Linux Audio

Check our new training course

Loading...
v3.1
   1/********************************************************************
   2 Filename:      via-ircc.c
   3 Version:       1.0 
   4 Description:   Driver for the VIA VT8231/VT8233 IrDA chipsets
   5 Author:        VIA Technologies,inc
   6 Date  :	08/06/2003
   7
   8Copyright (c) 1998-2003 VIA Technologies, Inc.
   9
  10This program is free software; you can redistribute it and/or modify it under
  11the terms of the GNU General Public License as published by the Free Software
  12Foundation; either version 2, or (at your option) any later version.
  13
  14This program is distributed in the hope that it will be useful, but WITHOUT
  15ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
  16MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17See the GNU General Public License for more details.
  18
  19You should have received a copy of the GNU General Public License along with
  20this program; if not, write to the Free Software Foundation, Inc.,
  2159 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  22
  23F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
  24F02 Oct/28/02: Add SB device ID for 3147 and 3177.
  25 Comment :
  26       jul/09/2002 : only implement two kind of dongle currently.
  27       Oct/02/2002 : work on VT8231 and VT8233 .
  28       Aug/06/2003 : change driver format to pci driver .
  29
  302004-02-16: <sda@bdit.de>
  31- Removed unneeded 'legacy' pci stuff.
  32- Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
  33- On speed change from core, don't send SIR frame with new speed. 
  34  Use current speed and change speeds later.
  35- Make module-param dongle_id actually work.
  36- New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only. 
  37  Tested with home-grown PCB on EPIA boards.
  38- Code cleanup.
  39       
  40 ********************************************************************/
  41#include <linux/module.h>
  42#include <linux/kernel.h>
  43#include <linux/types.h>
  44#include <linux/skbuff.h>
  45#include <linux/netdevice.h>
  46#include <linux/ioport.h>
  47#include <linux/delay.h>
  48#include <linux/init.h>
  49#include <linux/interrupt.h>
  50#include <linux/rtnetlink.h>
  51#include <linux/pci.h>
  52#include <linux/dma-mapping.h>
  53#include <linux/gfp.h>
  54
  55#include <asm/io.h>
  56#include <asm/dma.h>
  57#include <asm/byteorder.h>
  58
  59#include <linux/pm.h>
  60
  61#include <net/irda/wrapper.h>
  62#include <net/irda/irda.h>
  63#include <net/irda/irda_device.h>
  64
  65#include "via-ircc.h"
  66
  67#define VIA_MODULE_NAME "via-ircc"
  68#define CHIP_IO_EXTENT 0x40
  69
  70static char *driver_name = VIA_MODULE_NAME;
  71
  72/* Module parameters */
  73static int qos_mtt_bits = 0x07;	/* 1 ms or more */
  74static int dongle_id = 0;	/* default: probe */
  75
  76/* We can't guess the type of connected dongle, user *must* supply it. */
  77module_param(dongle_id, int, 0);
  78
  79/* Some prototypes */
  80static int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
  81			 unsigned int id);
  82static int via_ircc_dma_receive(struct via_ircc_cb *self);
  83static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
  84					 int iobase);
  85static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
  86						struct net_device *dev);
  87static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
  88						struct net_device *dev);
  89static void via_hw_init(struct via_ircc_cb *self);
  90static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
  91static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
  92static int via_ircc_is_receiving(struct via_ircc_cb *self);
  93static int via_ircc_read_dongle_id(int iobase);
  94
  95static int via_ircc_net_open(struct net_device *dev);
  96static int via_ircc_net_close(struct net_device *dev);
  97static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
  98			      int cmd);
  99static void via_ircc_change_dongle_speed(int iobase, int speed,
 100					 int dongle_id);
 101static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
 102static void hwreset(struct via_ircc_cb *self);
 103static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
 104static int upload_rxdata(struct via_ircc_cb *self, int iobase);
 105static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id);
 106static void __devexit via_remove_one (struct pci_dev *pdev);
 107
 108/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
 109static void iodelay(int udelay)
 110{
 111	u8 data;
 112	int i;
 113
 114	for (i = 0; i < udelay; i++) {
 115		data = inb(0x80);
 116	}
 117}
 118
 119static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
 120	{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
 121	{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
 122	{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
 123	{ PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
 124	{ PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
 125	{ 0, }
 126};
 127
 128MODULE_DEVICE_TABLE(pci,via_pci_tbl);
 129
 130
 131static struct pci_driver via_driver = {
 132	.name		= VIA_MODULE_NAME,
 133	.id_table	= via_pci_tbl,
 134	.probe		= via_init_one,
 135	.remove		= __devexit_p(via_remove_one),
 136};
 137
 138
 139/*
 140 * Function via_ircc_init ()
 141 *
 142 *    Initialize chip. Just find out chip type and resource.
 143 */
 144static int __init via_ircc_init(void)
 145{
 146	int rc;
 147
 148	IRDA_DEBUG(3, "%s()\n", __func__);
 149
 150	rc = pci_register_driver(&via_driver);
 151	if (rc < 0) {
 152		IRDA_DEBUG(0, "%s(): error rc = %d, returning  -ENODEV...\n",
 153			   __func__, rc);
 154		return -ENODEV;
 155	}
 156	return 0;
 157}
 158
 159static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id)
 160{
 161	int rc;
 162        u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
 163	u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
 164	chipio_t info;
 165
 166	IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
 167
 168	rc = pci_enable_device (pcidev);
 169	if (rc) {
 170		IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
 171		return -ENODEV;
 172	}
 173
 174	// South Bridge exist
 175        if ( ReadLPCReg(0x20) != 0x3C )
 176		Chipset=0x3096;
 177	else
 178		Chipset=0x3076;
 179
 180	if (Chipset==0x3076) {
 181		IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
 182
 183		WriteLPCReg(7,0x0c );
 184		temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
 185		if((temp&0x01)==1) {   // BIOS close or no FIR
 186			WriteLPCReg(0x1d, 0x82 );
 187			WriteLPCReg(0x23,0x18);
 188			temp=ReadLPCReg(0xF0);
 189			if((temp&0x01)==0) {
 190				temp=(ReadLPCReg(0x74)&0x03);    //DMA
 191				FirDRQ0=temp + 4;
 192				temp=(ReadLPCReg(0x74)&0x0C) >> 2;
 193				FirDRQ1=temp + 4;
 194			} else {
 195				temp=(ReadLPCReg(0x74)&0x0C) >> 2;    //DMA
 196				FirDRQ0=temp + 4;
 197				FirDRQ1=FirDRQ0;
 198			}
 199			FirIRQ=(ReadLPCReg(0x70)&0x0f);		//IRQ
 200			FirIOBase=ReadLPCReg(0x60 ) << 8;	//IO Space :high byte
 201			FirIOBase=FirIOBase| ReadLPCReg(0x61) ;	//low byte
 202			FirIOBase=FirIOBase  ;
 203			info.fir_base=FirIOBase;
 204			info.irq=FirIRQ;
 205			info.dma=FirDRQ1;
 206			info.dma2=FirDRQ0;
 207			pci_read_config_byte(pcidev,0x40,&bTmp);
 208			pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
 209			pci_read_config_byte(pcidev,0x42,&bTmp);
 210			pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
 211			pci_write_config_byte(pcidev,0x5a,0xc0);
 212			WriteLPCReg(0x28, 0x70 );
 213			if (via_ircc_open(pcidev, &info, 0x3076) == 0)
 214				rc=0;
 215		} else
 216			rc = -ENODEV; //IR not turn on	 
 217	} else { //Not VT1211
 218		IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
 219
 220		pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
 221		if((bTmp&0x01)==1) {  // BIOS enable FIR
 222			//Enable Double DMA clock
 223			pci_read_config_byte(pcidev,0x42,&oldPCI_40);
 224			pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
 225			pci_read_config_byte(pcidev,0x40,&oldPCI_40);
 226			pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
 227			pci_read_config_byte(pcidev,0x44,&oldPCI_44);
 228			pci_write_config_byte(pcidev,0x44,0x4e);
 229  //---------- read configuration from Function0 of south bridge
 230			if((bTmp&0x02)==0) {
 231				pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
 232				FirDRQ0 = (bTmp1 & 0x30) >> 4;
 233				pci_read_config_byte(pcidev,0x44,&bTmp1);
 234				FirDRQ1 = (bTmp1 & 0xc0) >> 6;
 235			} else  {
 236				pci_read_config_byte(pcidev,0x44,&bTmp1);    //DMA
 237				FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
 238				FirDRQ1=0;
 239			}
 240			pci_read_config_byte(pcidev,0x47,&bTmp1);  //IRQ
 241			FirIRQ = bTmp1 & 0x0f;
 242
 243			pci_read_config_byte(pcidev,0x69,&bTmp);
 244			FirIOBase = bTmp << 8;//hight byte
 245			pci_read_config_byte(pcidev,0x68,&bTmp);
 246			FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
 247  //-------------------------
 248			info.fir_base=FirIOBase;
 249			info.irq=FirIRQ;
 250			info.dma=FirDRQ1;
 251			info.dma2=FirDRQ0;
 252			if (via_ircc_open(pcidev, &info, 0x3096) == 0)
 253				rc=0;
 254		} else
 255			rc = -ENODEV; //IR not turn on !!!!!
 256	}//Not VT1211
 257
 258	IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
 259	return rc;
 260}
 261
 262static void __exit via_ircc_cleanup(void)
 263{
 264	IRDA_DEBUG(3, "%s()\n", __func__);
 265
 266	/* Cleanup all instances of the driver */
 267	pci_unregister_driver (&via_driver); 
 268}
 269
 270static const struct net_device_ops via_ircc_sir_ops = {
 271	.ndo_start_xmit = via_ircc_hard_xmit_sir,
 272	.ndo_open = via_ircc_net_open,
 273	.ndo_stop = via_ircc_net_close,
 274	.ndo_do_ioctl = via_ircc_net_ioctl,
 275};
 276static const struct net_device_ops via_ircc_fir_ops = {
 277	.ndo_start_xmit = via_ircc_hard_xmit_fir,
 278	.ndo_open = via_ircc_net_open,
 279	.ndo_stop = via_ircc_net_close,
 280	.ndo_do_ioctl = via_ircc_net_ioctl,
 281};
 282
 283/*
 284 * Function via_ircc_open(pdev, iobase, irq)
 285 *
 286 *    Open driver instance
 287 *
 288 */
 289static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
 290				   unsigned int id)
 291{
 292	struct net_device *dev;
 293	struct via_ircc_cb *self;
 294	int err;
 295
 296	IRDA_DEBUG(3, "%s()\n", __func__);
 297
 298	/* Allocate new instance of the driver */
 299	dev = alloc_irdadev(sizeof(struct via_ircc_cb));
 300	if (dev == NULL) 
 301		return -ENOMEM;
 302
 303	self = netdev_priv(dev);
 304	self->netdev = dev;
 305	spin_lock_init(&self->lock);
 306
 307	pci_set_drvdata(pdev, self);
 308
 309	/* Initialize Resource */
 310	self->io.cfg_base = info->cfg_base;
 311	self->io.fir_base = info->fir_base;
 312	self->io.irq = info->irq;
 313	self->io.fir_ext = CHIP_IO_EXTENT;
 314	self->io.dma = info->dma;
 315	self->io.dma2 = info->dma2;
 316	self->io.fifo_size = 32;
 317	self->chip_id = id;
 318	self->st_fifo.len = 0;
 319	self->RxDataReady = 0;
 320
 321	/* Reserve the ioports that we need */
 322	if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
 323		IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
 324			   __func__, self->io.fir_base);
 325		err = -ENODEV;
 326		goto err_out1;
 327	}
 328	
 329	/* Initialize QoS for this device */
 330	irda_init_max_qos_capabilies(&self->qos);
 331
 332	/* Check if user has supplied the dongle id or not */
 333	if (!dongle_id)
 334		dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
 335	self->io.dongle_id = dongle_id;
 336
 337	/* The only value we must override it the baudrate */
 338	/* Maximum speeds and capabilities are dongle-dependent. */
 339	switch( self->io.dongle_id ){
 340	case 0x0d:
 341		self->qos.baud_rate.bits =
 342		    IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
 343		    IR_576000 | IR_1152000 | (IR_4000000 << 8);
 344		break;
 345	default:
 346		self->qos.baud_rate.bits =
 347		    IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
 348		break;
 349	}
 350
 351	/* Following was used for testing:
 352	 *
 353	 *   self->qos.baud_rate.bits = IR_9600;
 354	 *
 355	 * Is is no good, as it prohibits (error-prone) speed-changes.
 356	 */
 357
 358	self->qos.min_turn_time.bits = qos_mtt_bits;
 359	irda_qos_bits_to_value(&self->qos);
 360
 361	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
 362	self->rx_buff.truesize = 14384 + 2048;
 363	self->tx_buff.truesize = 14384 + 2048;
 364
 365	/* Allocate memory if needed */
 366	self->rx_buff.head =
 367		dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
 368				   &self->rx_buff_dma, GFP_KERNEL);
 369	if (self->rx_buff.head == NULL) {
 370		err = -ENOMEM;
 371		goto err_out2;
 372	}
 373	memset(self->rx_buff.head, 0, self->rx_buff.truesize);
 374
 375	self->tx_buff.head =
 376		dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
 377				   &self->tx_buff_dma, GFP_KERNEL);
 378	if (self->tx_buff.head == NULL) {
 379		err = -ENOMEM;
 380		goto err_out3;
 381	}
 382	memset(self->tx_buff.head, 0, self->tx_buff.truesize);
 383
 384	self->rx_buff.in_frame = FALSE;
 385	self->rx_buff.state = OUTSIDE_FRAME;
 386	self->tx_buff.data = self->tx_buff.head;
 387	self->rx_buff.data = self->rx_buff.head;
 388
 389	/* Reset Tx queue info */
 390	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
 391	self->tx_fifo.tail = self->tx_buff.head;
 392
 393	/* Override the network functions we need to use */
 394	dev->netdev_ops = &via_ircc_sir_ops;
 395
 396	err = register_netdev(dev);
 397	if (err)
 398		goto err_out4;
 399
 400	IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
 
 401
 402	/* Initialise the hardware..
 403	*/
 404	self->io.speed = 9600;
 405	via_hw_init(self);
 406	return 0;
 407 err_out4:
 408	dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
 409			  self->tx_buff.head, self->tx_buff_dma);
 410 err_out3:
 411	dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
 412			  self->rx_buff.head, self->rx_buff_dma);
 413 err_out2:
 414	release_region(self->io.fir_base, self->io.fir_ext);
 415 err_out1:
 416	pci_set_drvdata(pdev, NULL);
 417	free_netdev(dev);
 418	return err;
 419}
 420
 421/*
 422 * Function via_remove_one(pdev)
 423 *
 424 *    Close driver instance
 425 *
 426 */
 427static void __devexit via_remove_one(struct pci_dev *pdev)
 428{
 429	struct via_ircc_cb *self = pci_get_drvdata(pdev);
 430	int iobase;
 431
 432	IRDA_DEBUG(3, "%s()\n", __func__);
 433
 434	iobase = self->io.fir_base;
 435
 436	ResetChip(iobase, 5);	//hardware reset.
 437	/* Remove netdevice */
 438	unregister_netdev(self->netdev);
 439
 440	/* Release the PORT that this driver is using */
 441	IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
 442		   __func__, self->io.fir_base);
 443	release_region(self->io.fir_base, self->io.fir_ext);
 444	if (self->tx_buff.head)
 445		dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
 446				  self->tx_buff.head, self->tx_buff_dma);
 447	if (self->rx_buff.head)
 448		dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
 449				  self->rx_buff.head, self->rx_buff_dma);
 450	pci_set_drvdata(pdev, NULL);
 451
 452	free_netdev(self->netdev);
 453
 454	pci_disable_device(pdev);
 455}
 456
 457/*
 458 * Function via_hw_init(self)
 459 *
 460 *    Returns non-negative on success.
 461 *
 462 * Formerly via_ircc_setup 
 463 */
 464static void via_hw_init(struct via_ircc_cb *self)
 465{
 466	int iobase = self->io.fir_base;
 467
 468	IRDA_DEBUG(3, "%s()\n", __func__);
 469
 470	SetMaxRxPacketSize(iobase, 0x0fff);	//set to max:4095
 471	// FIFO Init
 472	EnRXFIFOReadyInt(iobase, OFF);
 473	EnRXFIFOHalfLevelInt(iobase, OFF);
 474	EnTXFIFOHalfLevelInt(iobase, OFF);
 475	EnTXFIFOUnderrunEOMInt(iobase, ON);
 476	EnTXFIFOReadyInt(iobase, OFF);
 477	InvertTX(iobase, OFF);
 478	InvertRX(iobase, OFF);
 479
 480	if (ReadLPCReg(0x20) == 0x3c)
 481		WriteLPCReg(0xF0, 0);	// for VT1211
 482	/* Int Init */
 483	EnRXSpecInt(iobase, ON);
 484
 485	/* The following is basically hwreset */
 486	/* If this is the case, why not just call hwreset() ? Jean II */
 487	ResetChip(iobase, 5);
 488	EnableDMA(iobase, OFF);
 489	EnableTX(iobase, OFF);
 490	EnableRX(iobase, OFF);
 491	EnRXDMA(iobase, OFF);
 492	EnTXDMA(iobase, OFF);
 493	RXStart(iobase, OFF);
 494	TXStart(iobase, OFF);
 495	InitCard(iobase);
 496	CommonInit(iobase);
 497	SIRFilter(iobase, ON);
 498	SetSIR(iobase, ON);
 499	CRC16(iobase, ON);
 500	EnTXCRC(iobase, 0);
 501	WriteReg(iobase, I_ST_CT_0, 0x00);
 502	SetBaudRate(iobase, 9600);
 503	SetPulseWidth(iobase, 12);
 504	SetSendPreambleCount(iobase, 0);
 505
 506	self->io.speed = 9600;
 507	self->st_fifo.len = 0;
 508
 509	via_ircc_change_dongle_speed(iobase, self->io.speed,
 510				     self->io.dongle_id);
 511
 512	WriteReg(iobase, I_ST_CT_0, 0x80);
 513}
 514
 515/*
 516 * Function via_ircc_read_dongle_id (void)
 517 *
 518 */
 519static int via_ircc_read_dongle_id(int iobase)
 520{
 521	int dongle_id = 9;	/* Default to IBM */
 522
 523	IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
 524	return dongle_id;
 525}
 526
 527/*
 528 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
 529 *    Change speed of the attach dongle
 530 *    only implement two type of dongle currently.
 531 */
 532static void via_ircc_change_dongle_speed(int iobase, int speed,
 533					 int dongle_id)
 534{
 535	u8 mode = 0;
 536
 537	/* speed is unused, as we use IsSIROn()/IsMIROn() */
 538	speed = speed;
 539
 540	IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
 541		   __func__, speed, iobase, dongle_id);
 542
 543	switch (dongle_id) {
 544
 545		/* Note: The dongle_id's listed here are derived from
 546		 * nsc-ircc.c */ 
 547
 548	case 0x08:		/* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
 549		UseOneRX(iobase, ON);	// use one RX pin   RX1,RX2
 550		InvertTX(iobase, OFF);
 551		InvertRX(iobase, OFF);
 552
 553		EnRX2(iobase, ON);	//sir to rx2
 554		EnGPIOtoRX2(iobase, OFF);
 555
 556		if (IsSIROn(iobase)) {	//sir
 557			// Mode select Off
 558			SlowIRRXLowActive(iobase, ON);
 559			udelay(1000);
 560			SlowIRRXLowActive(iobase, OFF);
 561		} else {
 562			if (IsMIROn(iobase)) {	//mir
 563				// Mode select On
 564				SlowIRRXLowActive(iobase, OFF);
 565				udelay(20);
 566			} else {	// fir
 567				if (IsFIROn(iobase)) {	//fir
 568					// Mode select On
 569					SlowIRRXLowActive(iobase, OFF);
 570					udelay(20);
 571				}
 572			}
 573		}
 574		break;
 575
 576	case 0x09:		/* IBM31T1100 or Temic TFDS6000/TFDS6500 */
 577		UseOneRX(iobase, ON);	//use ONE RX....RX1
 578		InvertTX(iobase, OFF);
 579		InvertRX(iobase, OFF);	// invert RX pin
 580
 581		EnRX2(iobase, ON);
 582		EnGPIOtoRX2(iobase, OFF);
 583		if (IsSIROn(iobase)) {	//sir
 584			// Mode select On
 585			SlowIRRXLowActive(iobase, ON);
 586			udelay(20);
 587			// Mode select Off
 588			SlowIRRXLowActive(iobase, OFF);
 589		}
 590		if (IsMIROn(iobase)) {	//mir
 591			// Mode select On
 592			SlowIRRXLowActive(iobase, OFF);
 593			udelay(20);
 594			// Mode select Off
 595			SlowIRRXLowActive(iobase, ON);
 596		} else {	// fir
 597			if (IsFIROn(iobase)) {	//fir
 598				// Mode select On
 599				SlowIRRXLowActive(iobase, OFF);
 600				// TX On
 601				WriteTX(iobase, ON);
 602				udelay(20);
 603				// Mode select OFF
 604				SlowIRRXLowActive(iobase, ON);
 605				udelay(20);
 606				// TX Off
 607				WriteTX(iobase, OFF);
 608			}
 609		}
 610		break;
 611
 612	case 0x0d:
 613		UseOneRX(iobase, OFF);	// use two RX pin   RX1,RX2
 614		InvertTX(iobase, OFF);
 615		InvertRX(iobase, OFF);
 616		SlowIRRXLowActive(iobase, OFF);
 617		if (IsSIROn(iobase)) {	//sir
 618			EnGPIOtoRX2(iobase, OFF);
 619			WriteGIO(iobase, OFF);
 620			EnRX2(iobase, OFF);	//sir to rx2
 621		} else {	// fir mir
 622			EnGPIOtoRX2(iobase, OFF);
 623			WriteGIO(iobase, OFF);
 624			EnRX2(iobase, OFF);	//fir to rx
 625		}
 626		break;
 627
 628	case 0x11:		/* Temic TFDS4500 */
 629
 630		IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
 
 631
 632		UseOneRX(iobase, ON);	//use ONE RX....RX1
 633		InvertTX(iobase, OFF);
 634		InvertRX(iobase, ON);	// invert RX pin
 635	
 636		EnRX2(iobase, ON);	//sir to rx2
 637		EnGPIOtoRX2(iobase, OFF);
 638
 639		if( IsSIROn(iobase) ){	//sir
 640
 641			// Mode select On
 642			SlowIRRXLowActive(iobase, ON);
 643			udelay(20);
 644			// Mode select Off
 645			SlowIRRXLowActive(iobase, OFF);
 646
 647		} else{
 648			IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
 
 649		}
 650		break;
 651
 652	case 0x0ff:		/* Vishay */
 653		if (IsSIROn(iobase))
 654			mode = 0;
 655		else if (IsMIROn(iobase))
 656			mode = 1;
 657		else if (IsFIROn(iobase))
 658			mode = 2;
 659		else if (IsVFIROn(iobase))
 660			mode = 5;	//VFIR-16
 661		SI_SetMode(iobase, mode);
 662		break;
 663
 664	default:
 665		IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
 666			   __func__, dongle_id);
 667	}
 668}
 669
 670/*
 671 * Function via_ircc_change_speed (self, baud)
 672 *
 673 *    Change the speed of the device
 674 *
 675 */
 676static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
 677{
 678	struct net_device *dev = self->netdev;
 679	u16 iobase;
 680	u8 value = 0, bTmp;
 681
 682	iobase = self->io.fir_base;
 683	/* Update accounting for new speed */
 684	self->io.speed = speed;
 685	IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
 686
 687	WriteReg(iobase, I_ST_CT_0, 0x0);
 688
 689	/* Controller mode sellection */
 690	switch (speed) {
 691	case 2400:
 692	case 9600:
 693	case 19200:
 694	case 38400:
 695	case 57600:
 696	case 115200:
 697		value = (115200/speed)-1;
 698		SetSIR(iobase, ON);
 699		CRC16(iobase, ON);
 700		break;
 701	case 576000:
 702		/* FIXME: this can't be right, as it's the same as 115200,
 703		 * and 576000 is MIR, not SIR. */
 704		value = 0;
 705		SetSIR(iobase, ON);
 706		CRC16(iobase, ON);
 707		break;
 708	case 1152000:
 709		value = 0;
 710		SetMIR(iobase, ON);
 711		/* FIXME: CRC ??? */
 712		break;
 713	case 4000000:
 714		value = 0;
 715		SetFIR(iobase, ON);
 716		SetPulseWidth(iobase, 0);
 717		SetSendPreambleCount(iobase, 14);
 718		CRC16(iobase, OFF);
 719		EnTXCRC(iobase, ON);
 720		break;
 721	case 16000000:
 722		value = 0;
 723		SetVFIR(iobase, ON);
 724		/* FIXME: CRC ??? */
 725		break;
 726	default:
 727		value = 0;
 728		break;
 729	}
 730
 731	/* Set baudrate to 0x19[2..7] */
 732	bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
 733	bTmp |= value << 2;
 734	WriteReg(iobase, I_CF_H_1, bTmp);
 735
 736	/* Some dongles may need to be informed about speed changes. */
 737	via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
 738
 739	/* Set FIFO size to 64 */
 740	SetFIFO(iobase, 64);
 741
 742	/* Enable IR */
 743	WriteReg(iobase, I_ST_CT_0, 0x80);
 744
 745	// EnTXFIFOHalfLevelInt(iobase,ON);
 746
 747	/* Enable some interrupts so we can receive frames */
 748	//EnAllInt(iobase,ON);
 749
 750	if (IsSIROn(iobase)) {
 751		SIRFilter(iobase, ON);
 752		SIRRecvAny(iobase, ON);
 753	} else {
 754		SIRFilter(iobase, OFF);
 755		SIRRecvAny(iobase, OFF);
 756	}
 757
 758	if (speed > 115200) {
 759		/* Install FIR xmit handler */
 760		dev->netdev_ops = &via_ircc_fir_ops;
 761		via_ircc_dma_receive(self);
 762	} else {
 763		/* Install SIR xmit handler */
 764		dev->netdev_ops = &via_ircc_sir_ops;
 765	}
 766	netif_wake_queue(dev);
 767}
 768
 769/*
 770 * Function via_ircc_hard_xmit (skb, dev)
 771 *
 772 *    Transmit the frame!
 773 *
 774 */
 775static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
 776						struct net_device *dev)
 777{
 778	struct via_ircc_cb *self;
 779	unsigned long flags;
 780	u16 iobase;
 781	__u32 speed;
 782
 783	self = netdev_priv(dev);
 784	IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
 785	iobase = self->io.fir_base;
 786
 787	netif_stop_queue(dev);
 788	/* Check if we need to change the speed */
 789	speed = irda_get_next_speed(skb);
 790	if ((speed != self->io.speed) && (speed != -1)) {
 791		/* Check for empty frame */
 792		if (!skb->len) {
 793			via_ircc_change_speed(self, speed);
 794			dev->trans_start = jiffies;
 795			dev_kfree_skb(skb);
 796			return NETDEV_TX_OK;
 797		} else
 798			self->new_speed = speed;
 799	}
 800	InitCard(iobase);
 801	CommonInit(iobase);
 802	SIRFilter(iobase, ON);
 803	SetSIR(iobase, ON);
 804	CRC16(iobase, ON);
 805	EnTXCRC(iobase, 0);
 806	WriteReg(iobase, I_ST_CT_0, 0x00);
 807
 808	spin_lock_irqsave(&self->lock, flags);
 809	self->tx_buff.data = self->tx_buff.head;
 810	self->tx_buff.len =
 811	    async_wrap_skb(skb, self->tx_buff.data,
 812			   self->tx_buff.truesize);
 813
 814	dev->stats.tx_bytes += self->tx_buff.len;
 815	/* Send this frame with old speed */
 816	SetBaudRate(iobase, self->io.speed);
 817	SetPulseWidth(iobase, 12);
 818	SetSendPreambleCount(iobase, 0);
 819	WriteReg(iobase, I_ST_CT_0, 0x80);
 820
 821	EnableTX(iobase, ON);
 822	EnableRX(iobase, OFF);
 823
 824	ResetChip(iobase, 0);
 825	ResetChip(iobase, 1);
 826	ResetChip(iobase, 2);
 827	ResetChip(iobase, 3);
 828	ResetChip(iobase, 4);
 829
 830	EnAllInt(iobase, ON);
 831	EnTXDMA(iobase, ON);
 832	EnRXDMA(iobase, OFF);
 833
 834	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
 835		       DMA_TX_MODE);
 836
 837	SetSendByte(iobase, self->tx_buff.len);
 838	RXStart(iobase, OFF);
 839	TXStart(iobase, ON);
 840
 841	dev->trans_start = jiffies;
 842	spin_unlock_irqrestore(&self->lock, flags);
 843	dev_kfree_skb(skb);
 844	return NETDEV_TX_OK;
 845}
 846
 847static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
 848						struct net_device *dev)
 849{
 850	struct via_ircc_cb *self;
 851	u16 iobase;
 852	__u32 speed;
 853	unsigned long flags;
 854
 855	self = netdev_priv(dev);
 856	iobase = self->io.fir_base;
 857
 858	if (self->st_fifo.len)
 859		return NETDEV_TX_OK;
 860	if (self->chip_id == 0x3076)
 861		iodelay(1500);
 862	else
 863		udelay(1500);
 864	netif_stop_queue(dev);
 865	speed = irda_get_next_speed(skb);
 866	if ((speed != self->io.speed) && (speed != -1)) {
 867		if (!skb->len) {
 868			via_ircc_change_speed(self, speed);
 869			dev->trans_start = jiffies;
 870			dev_kfree_skb(skb);
 871			return NETDEV_TX_OK;
 872		} else
 873			self->new_speed = speed;
 874	}
 875	spin_lock_irqsave(&self->lock, flags);
 876	self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
 877	self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
 878
 879	self->tx_fifo.tail += skb->len;
 880	dev->stats.tx_bytes += skb->len;
 881	skb_copy_from_linear_data(skb,
 882		      self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
 883	self->tx_fifo.len++;
 884	self->tx_fifo.free++;
 885//F01   if (self->tx_fifo.len == 1) {
 886	via_ircc_dma_xmit(self, iobase);
 887//F01   }
 888//F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
 889	dev->trans_start = jiffies;
 890	dev_kfree_skb(skb);
 891	spin_unlock_irqrestore(&self->lock, flags);
 892	return NETDEV_TX_OK;
 893
 894}
 895
 896static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
 897{
 898	EnTXDMA(iobase, OFF);
 899	self->io.direction = IO_XMIT;
 900	EnPhys(iobase, ON);
 901	EnableTX(iobase, ON);
 902	EnableRX(iobase, OFF);
 903	ResetChip(iobase, 0);
 904	ResetChip(iobase, 1);
 905	ResetChip(iobase, 2);
 906	ResetChip(iobase, 3);
 907	ResetChip(iobase, 4);
 908	EnAllInt(iobase, ON);
 909	EnTXDMA(iobase, ON);
 910	EnRXDMA(iobase, OFF);
 911	irda_setup_dma(self->io.dma,
 912		       ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
 913			self->tx_buff.head) + self->tx_buff_dma,
 914		       self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
 915	IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
 916		   __func__, self->tx_fifo.ptr,
 917		   self->tx_fifo.queue[self->tx_fifo.ptr].len,
 918		   self->tx_fifo.len);
 919
 920	SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
 921	RXStart(iobase, OFF);
 922	TXStart(iobase, ON);
 923	return 0;
 924
 925}
 926
 927/*
 928 * Function via_ircc_dma_xmit_complete (self)
 929 *
 930 *    The transfer of a frame in finished. This function will only be called 
 931 *    by the interrupt handler
 932 *
 933 */
 934static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
 935{
 936	int iobase;
 937	int ret = TRUE;
 938	u8 Tx_status;
 939
 940	IRDA_DEBUG(3, "%s()\n", __func__);
 941
 942	iobase = self->io.fir_base;
 943	/* Disable DMA */
 944//      DisableDmaChannel(self->io.dma);
 945	/* Check for underrrun! */
 946	/* Clear bit, by writing 1 into it */
 947	Tx_status = GetTXStatus(iobase);
 948	if (Tx_status & 0x08) {
 949		self->netdev->stats.tx_errors++;
 950		self->netdev->stats.tx_fifo_errors++;
 951		hwreset(self);
 952// how to clear underrrun ?
 953	} else {
 954		self->netdev->stats.tx_packets++;
 955		ResetChip(iobase, 3);
 956		ResetChip(iobase, 4);
 957	}
 958	/* Check if we need to change the speed */
 959	if (self->new_speed) {
 960		via_ircc_change_speed(self, self->new_speed);
 961		self->new_speed = 0;
 962	}
 963
 964	/* Finished with this frame, so prepare for next */
 965	if (IsFIROn(iobase)) {
 966		if (self->tx_fifo.len) {
 967			self->tx_fifo.len--;
 968			self->tx_fifo.ptr++;
 969		}
 970	}
 971	IRDA_DEBUG(1,
 972		   "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
 973		   __func__,
 974		   self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
 975/* F01_S
 976	// Any frames to be sent back-to-back? 
 977	if (self->tx_fifo.len) {
 978		// Not finished yet! 
 979	  	via_ircc_dma_xmit(self, iobase);
 980		ret = FALSE;
 981	} else { 
 982F01_E*/
 983	// Reset Tx FIFO info 
 984	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
 985	self->tx_fifo.tail = self->tx_buff.head;
 986//F01   }
 987
 988	// Make sure we have room for more frames 
 989//F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
 990	// Not busy transmitting anymore 
 991	// Tell the network layer, that we can accept more frames 
 992	netif_wake_queue(self->netdev);
 993//F01   }
 994	return ret;
 995}
 996
 997/*
 998 * Function via_ircc_dma_receive (self)
 999 *
1000 *    Set configuration for receive a frame.
1001 *
1002 */
1003static int via_ircc_dma_receive(struct via_ircc_cb *self)
1004{
1005	int iobase;
1006
1007	iobase = self->io.fir_base;
1008
1009	IRDA_DEBUG(3, "%s()\n", __func__);
1010
1011	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1012	self->tx_fifo.tail = self->tx_buff.head;
1013	self->RxDataReady = 0;
1014	self->io.direction = IO_RECV;
1015	self->rx_buff.data = self->rx_buff.head;
1016	self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1017	self->st_fifo.tail = self->st_fifo.head = 0;
1018
1019	EnPhys(iobase, ON);
1020	EnableTX(iobase, OFF);
1021	EnableRX(iobase, ON);
1022
1023	ResetChip(iobase, 0);
1024	ResetChip(iobase, 1);
1025	ResetChip(iobase, 2);
1026	ResetChip(iobase, 3);
1027	ResetChip(iobase, 4);
1028
1029	EnAllInt(iobase, ON);
1030	EnTXDMA(iobase, OFF);
1031	EnRXDMA(iobase, ON);
1032	irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1033		  self->rx_buff.truesize, DMA_RX_MODE);
1034	TXStart(iobase, OFF);
1035	RXStart(iobase, ON);
1036
1037	return 0;
1038}
1039
1040/*
1041 * Function via_ircc_dma_receive_complete (self)
1042 *
1043 *    Controller Finished with receiving frames,
1044 *    and this routine is call by ISR
1045 *    
1046 */
1047static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1048					 int iobase)
1049{
1050	struct st_fifo *st_fifo;
1051	struct sk_buff *skb;
1052	int len, i;
1053	u8 status = 0;
1054
1055	iobase = self->io.fir_base;
1056	st_fifo = &self->st_fifo;
1057
1058	if (self->io.speed < 4000000) {	//Speed below FIR
1059		len = GetRecvByte(iobase, self);
1060		skb = dev_alloc_skb(len + 1);
1061		if (skb == NULL)
1062			return FALSE;
1063		// Make sure IP header gets aligned 
1064		skb_reserve(skb, 1);
1065		skb_put(skb, len - 2);
1066		if (self->chip_id == 0x3076) {
1067			for (i = 0; i < len - 2; i++)
1068				skb->data[i] = self->rx_buff.data[i * 2];
1069		} else {
1070			if (self->chip_id == 0x3096) {
1071				for (i = 0; i < len - 2; i++)
1072					skb->data[i] =
1073					    self->rx_buff.data[i];
1074			}
1075		}
1076		// Move to next frame 
1077		self->rx_buff.data += len;
1078		self->netdev->stats.rx_bytes += len;
1079		self->netdev->stats.rx_packets++;
1080		skb->dev = self->netdev;
1081		skb_reset_mac_header(skb);
1082		skb->protocol = htons(ETH_P_IRDA);
1083		netif_rx(skb);
1084		return TRUE;
1085	}
1086
1087	else {			//FIR mode
1088		len = GetRecvByte(iobase, self);
1089		if (len == 0)
1090			return TRUE;	//interrupt only, data maybe move by RxT  
1091		if (((len - 4) < 2) || ((len - 4) > 2048)) {
1092			IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1093				   __func__, len, RxCurCount(iobase, self),
1094				   self->RxLastCount);
1095			hwreset(self);
1096			return FALSE;
1097		}
1098		IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1099			   __func__,
1100			   st_fifo->len, len - 4, RxCurCount(iobase, self));
1101
1102		st_fifo->entries[st_fifo->tail].status = status;
1103		st_fifo->entries[st_fifo->tail].len = len;
1104		st_fifo->pending_bytes += len;
1105		st_fifo->tail++;
1106		st_fifo->len++;
1107		if (st_fifo->tail > MAX_RX_WINDOW)
1108			st_fifo->tail = 0;
1109		self->RxDataReady = 0;
1110
1111		// It maybe have MAX_RX_WINDOW package receive by
1112		// receive_complete before Timer IRQ
1113/* F01_S
1114          if (st_fifo->len < (MAX_RX_WINDOW+2 )) { 
1115		  RXStart(iobase,ON);
1116	  	  SetTimer(iobase,4);
1117	  }
1118	  else	  { 
1119F01_E */
1120		EnableRX(iobase, OFF);
1121		EnRXDMA(iobase, OFF);
1122		RXStart(iobase, OFF);
1123//F01_S
1124		// Put this entry back in fifo 
1125		if (st_fifo->head > MAX_RX_WINDOW)
1126			st_fifo->head = 0;
1127		status = st_fifo->entries[st_fifo->head].status;
1128		len = st_fifo->entries[st_fifo->head].len;
1129		st_fifo->head++;
1130		st_fifo->len--;
1131
1132		skb = dev_alloc_skb(len + 1 - 4);
1133		/*
1134		 * if frame size, data ptr, or skb ptr are wrong, then get next
1135		 * entry.
1136		 */
1137		if ((skb == NULL) || (skb->data == NULL) ||
1138		    (self->rx_buff.data == NULL) || (len < 6)) {
1139			self->netdev->stats.rx_dropped++;
1140			kfree_skb(skb);
1141			return TRUE;
1142		}
1143		skb_reserve(skb, 1);
1144		skb_put(skb, len - 4);
1145
1146		skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1147		IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1148			   len - 4, self->rx_buff.data);
1149
1150		// Move to next frame 
1151		self->rx_buff.data += len;
1152		self->netdev->stats.rx_bytes += len;
1153		self->netdev->stats.rx_packets++;
1154		skb->dev = self->netdev;
1155		skb_reset_mac_header(skb);
1156		skb->protocol = htons(ETH_P_IRDA);
1157		netif_rx(skb);
1158
1159//F01_E
1160	}			//FIR
1161	return TRUE;
1162
1163}
1164
1165/*
1166 * if frame is received , but no INT ,then use this routine to upload frame.
1167 */
1168static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1169{
1170	struct sk_buff *skb;
1171	int len;
1172	struct st_fifo *st_fifo;
1173	st_fifo = &self->st_fifo;
1174
1175	len = GetRecvByte(iobase, self);
1176
1177	IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1178
1179	if ((len - 4) < 2) {
1180		self->netdev->stats.rx_dropped++;
1181		return FALSE;
1182	}
1183
1184	skb = dev_alloc_skb(len + 1);
1185	if (skb == NULL) {
1186		self->netdev->stats.rx_dropped++;
1187		return FALSE;
1188	}
1189	skb_reserve(skb, 1);
1190	skb_put(skb, len - 4 + 1);
1191	skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1192	st_fifo->tail++;
1193	st_fifo->len++;
1194	if (st_fifo->tail > MAX_RX_WINDOW)
1195		st_fifo->tail = 0;
1196	// Move to next frame 
1197	self->rx_buff.data += len;
1198	self->netdev->stats.rx_bytes += len;
1199	self->netdev->stats.rx_packets++;
1200	skb->dev = self->netdev;
1201	skb_reset_mac_header(skb);
1202	skb->protocol = htons(ETH_P_IRDA);
1203	netif_rx(skb);
1204	if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1205		RXStart(iobase, ON);
1206	} else {
1207		EnableRX(iobase, OFF);
1208		EnRXDMA(iobase, OFF);
1209		RXStart(iobase, OFF);
1210	}
1211	return TRUE;
1212}
1213
1214/*
1215 * Implement back to back receive , use this routine to upload data.
1216 */
1217
1218static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1219{
1220	struct st_fifo *st_fifo;
1221	struct sk_buff *skb;
1222	int len;
1223	u8 status;
1224
1225	st_fifo = &self->st_fifo;
1226
1227	if (CkRxRecv(iobase, self)) {
1228		// if still receiving ,then return ,don't upload frame 
1229		self->RetryCount = 0;
1230		SetTimer(iobase, 20);
1231		self->RxDataReady++;
1232		return FALSE;
1233	} else
1234		self->RetryCount++;
1235
1236	if ((self->RetryCount >= 1) ||
1237	    ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1238	    (st_fifo->len >= (MAX_RX_WINDOW))) {
1239		while (st_fifo->len > 0) {	//upload frame
1240			// Put this entry back in fifo 
1241			if (st_fifo->head > MAX_RX_WINDOW)
1242				st_fifo->head = 0;
1243			status = st_fifo->entries[st_fifo->head].status;
1244			len = st_fifo->entries[st_fifo->head].len;
1245			st_fifo->head++;
1246			st_fifo->len--;
1247
1248			skb = dev_alloc_skb(len + 1 - 4);
1249			/*
1250			 * if frame size, data ptr, or skb ptr are wrong,
1251			 * then get next entry.
1252			 */
1253			if ((skb == NULL) || (skb->data == NULL) ||
1254			    (self->rx_buff.data == NULL) || (len < 6)) {
1255				self->netdev->stats.rx_dropped++;
1256				continue;
1257			}
1258			skb_reserve(skb, 1);
1259			skb_put(skb, len - 4);
1260			skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1261
1262			IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1263				   len - 4, st_fifo->head);
1264
1265			// Move to next frame 
1266			self->rx_buff.data += len;
1267			self->netdev->stats.rx_bytes += len;
1268			self->netdev->stats.rx_packets++;
1269			skb->dev = self->netdev;
1270			skb_reset_mac_header(skb);
1271			skb->protocol = htons(ETH_P_IRDA);
1272			netif_rx(skb);
1273		}		//while
1274		self->RetryCount = 0;
1275
1276		IRDA_DEBUG(2,
1277			   "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1278			   __func__,
1279			   GetHostStatus(iobase), GetRXStatus(iobase));
1280
1281		/*
1282		 * if frame is receive complete at this routine ,then upload
1283		 * frame.
1284		 */
1285		if ((GetRXStatus(iobase) & 0x10) &&
1286		    (RxCurCount(iobase, self) != self->RxLastCount)) {
1287			upload_rxdata(self, iobase);
1288			if (irda_device_txqueue_empty(self->netdev))
1289				via_ircc_dma_receive(self);
1290		}
1291	}			// timer detect complete
1292	else
1293		SetTimer(iobase, 4);
1294	return TRUE;
1295
1296}
1297
1298
1299
1300/*
1301 * Function via_ircc_interrupt (irq, dev_id)
1302 *
1303 *    An interrupt from the chip has arrived. Time to do some work
1304 *
1305 */
1306static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1307{
1308	struct net_device *dev = dev_id;
1309	struct via_ircc_cb *self = netdev_priv(dev);
1310	int iobase;
1311	u8 iHostIntType, iRxIntType, iTxIntType;
1312
1313	iobase = self->io.fir_base;
1314	spin_lock(&self->lock);
1315	iHostIntType = GetHostStatus(iobase);
1316
1317	IRDA_DEBUG(4, "%s(): iHostIntType %02x:  %s %s %s  %02x\n",
1318		   __func__, iHostIntType,
1319		   (iHostIntType & 0x40) ? "Timer" : "",
1320		   (iHostIntType & 0x20) ? "Tx" : "",
1321		   (iHostIntType & 0x10) ? "Rx" : "",
1322		   (iHostIntType & 0x0e) >> 1);
1323
1324	if ((iHostIntType & 0x40) != 0) {	//Timer Event
1325		self->EventFlag.TimeOut++;
1326		ClearTimerInt(iobase, 1);
1327		if (self->io.direction == IO_XMIT) {
1328			via_ircc_dma_xmit(self, iobase);
1329		}
1330		if (self->io.direction == IO_RECV) {
1331			/*
1332			 * frame ready hold too long, must reset.
1333			 */
1334			if (self->RxDataReady > 30) {
1335				hwreset(self);
1336				if (irda_device_txqueue_empty(self->netdev)) {
1337					via_ircc_dma_receive(self);
1338				}
1339			} else {	// call this to upload frame.
1340				RxTimerHandler(self, iobase);
1341			}
1342		}		//RECV
1343	}			//Timer Event
1344	if ((iHostIntType & 0x20) != 0) {	//Tx Event
1345		iTxIntType = GetTXStatus(iobase);
1346
1347		IRDA_DEBUG(4, "%s(): iTxIntType %02x:  %s %s %s %s\n",
1348			   __func__, iTxIntType,
1349			   (iTxIntType & 0x08) ? "FIFO underr." : "",
1350			   (iTxIntType & 0x04) ? "EOM" : "",
1351			   (iTxIntType & 0x02) ? "FIFO ready" : "",
1352			   (iTxIntType & 0x01) ? "Early EOM" : "");
1353
1354		if (iTxIntType & 0x4) {
1355			self->EventFlag.EOMessage++;	// read and will auto clean
1356			if (via_ircc_dma_xmit_complete(self)) {
1357				if (irda_device_txqueue_empty
1358				    (self->netdev)) {
1359					via_ircc_dma_receive(self);
1360				}
1361			} else {
1362				self->EventFlag.Unknown++;
1363			}
1364		}		//EOP
1365	}			//Tx Event
1366	//----------------------------------------
1367	if ((iHostIntType & 0x10) != 0) {	//Rx Event
1368		/* Check if DMA has finished */
1369		iRxIntType = GetRXStatus(iobase);
1370
1371		IRDA_DEBUG(4, "%s(): iRxIntType %02x:  %s %s %s %s %s %s %s\n",
1372			   __func__, iRxIntType,
1373			   (iRxIntType & 0x80) ? "PHY err."	: "",
1374			   (iRxIntType & 0x40) ? "CRC err"	: "",
1375			   (iRxIntType & 0x20) ? "FIFO overr."	: "",
1376			   (iRxIntType & 0x10) ? "EOF"		: "",
1377			   (iRxIntType & 0x08) ? "RxData"	: "",
1378			   (iRxIntType & 0x02) ? "RxMaxLen"	: "",
1379			   (iRxIntType & 0x01) ? "SIR bad"	: "");
1380		if (!iRxIntType)
1381			IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1382
1383		if (iRxIntType & 0x10) {
1384			if (via_ircc_dma_receive_complete(self, iobase)) {
1385//F01       if(!(IsFIROn(iobase)))  via_ircc_dma_receive(self);
1386				via_ircc_dma_receive(self);
1387			}
1388		}		// No ERR     
1389		else {		//ERR
1390			IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1391				   __func__, iRxIntType, iHostIntType,
1392				   RxCurCount(iobase, self),
1393				   self->RxLastCount);
1394
1395			if (iRxIntType & 0x20) {	//FIFO OverRun ERR
1396				ResetChip(iobase, 0);
1397				ResetChip(iobase, 1);
1398			} else {	//PHY,CRC ERR
1399
1400				if (iRxIntType != 0x08)
1401					hwreset(self);	//F01
1402			}
1403			via_ircc_dma_receive(self);
1404		}		//ERR
1405
1406	}			//Rx Event
1407	spin_unlock(&self->lock);
1408	return IRQ_RETVAL(iHostIntType);
1409}
1410
1411static void hwreset(struct via_ircc_cb *self)
1412{
1413	int iobase;
1414	iobase = self->io.fir_base;
1415
1416	IRDA_DEBUG(3, "%s()\n", __func__);
1417
1418	ResetChip(iobase, 5);
1419	EnableDMA(iobase, OFF);
1420	EnableTX(iobase, OFF);
1421	EnableRX(iobase, OFF);
1422	EnRXDMA(iobase, OFF);
1423	EnTXDMA(iobase, OFF);
1424	RXStart(iobase, OFF);
1425	TXStart(iobase, OFF);
1426	InitCard(iobase);
1427	CommonInit(iobase);
1428	SIRFilter(iobase, ON);
1429	SetSIR(iobase, ON);
1430	CRC16(iobase, ON);
1431	EnTXCRC(iobase, 0);
1432	WriteReg(iobase, I_ST_CT_0, 0x00);
1433	SetBaudRate(iobase, 9600);
1434	SetPulseWidth(iobase, 12);
1435	SetSendPreambleCount(iobase, 0);
1436	WriteReg(iobase, I_ST_CT_0, 0x80);
1437
1438	/* Restore speed. */
1439	via_ircc_change_speed(self, self->io.speed);
1440
1441	self->st_fifo.len = 0;
1442}
1443
1444/*
1445 * Function via_ircc_is_receiving (self)
1446 *
1447 *    Return TRUE is we are currently receiving a frame
1448 *
1449 */
1450static int via_ircc_is_receiving(struct via_ircc_cb *self)
1451{
1452	int status = FALSE;
1453	int iobase;
1454
1455	IRDA_ASSERT(self != NULL, return FALSE;);
1456
1457	iobase = self->io.fir_base;
1458	if (CkRxRecv(iobase, self))
1459		status = TRUE;
1460
1461	IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1462
1463	return status;
1464}
1465
1466
1467/*
1468 * Function via_ircc_net_open (dev)
1469 *
1470 *    Start the device
1471 *
1472 */
1473static int via_ircc_net_open(struct net_device *dev)
1474{
1475	struct via_ircc_cb *self;
1476	int iobase;
1477	char hwname[32];
1478
1479	IRDA_DEBUG(3, "%s()\n", __func__);
1480
1481	IRDA_ASSERT(dev != NULL, return -1;);
1482	self = netdev_priv(dev);
1483	dev->stats.rx_packets = 0;
1484	IRDA_ASSERT(self != NULL, return 0;);
1485	iobase = self->io.fir_base;
1486	if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1487		IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1488			     self->io.irq);
1489		return -EAGAIN;
1490	}
1491	/*
1492	 * Always allocate the DMA channel after the IRQ, and clean up on 
1493	 * failure.
1494	 */
1495	if (request_dma(self->io.dma, dev->name)) {
1496		IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1497			     self->io.dma);
1498		free_irq(self->io.irq, self);
1499		return -EAGAIN;
1500	}
1501	if (self->io.dma2 != self->io.dma) {
1502		if (request_dma(self->io.dma2, dev->name)) {
1503			IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1504				     driver_name, self->io.dma2);
1505			free_irq(self->io.irq, self);
1506			free_dma(self->io.dma);
1507			return -EAGAIN;
1508		}
1509	}
1510
1511
1512	/* turn on interrupts */
1513	EnAllInt(iobase, ON);
1514	EnInternalLoop(iobase, OFF);
1515	EnExternalLoop(iobase, OFF);
1516
1517	/* */
1518	via_ircc_dma_receive(self);
1519
1520	/* Ready to play! */
1521	netif_start_queue(dev);
1522
1523	/* 
1524	 * Open new IrLAP layer instance, now that everything should be
1525	 * initialized properly 
1526	 */
1527	sprintf(hwname, "VIA @ 0x%x", iobase);
1528	self->irlap = irlap_open(dev, &self->qos, hwname);
1529
1530	self->RxLastCount = 0;
1531
1532	return 0;
1533}
1534
1535/*
1536 * Function via_ircc_net_close (dev)
1537 *
1538 *    Stop the device
1539 *
1540 */
1541static int via_ircc_net_close(struct net_device *dev)
1542{
1543	struct via_ircc_cb *self;
1544	int iobase;
1545
1546	IRDA_DEBUG(3, "%s()\n", __func__);
1547
1548	IRDA_ASSERT(dev != NULL, return -1;);
1549	self = netdev_priv(dev);
1550	IRDA_ASSERT(self != NULL, return 0;);
1551
1552	/* Stop device */
1553	netif_stop_queue(dev);
1554	/* Stop and remove instance of IrLAP */
1555	if (self->irlap)
1556		irlap_close(self->irlap);
1557	self->irlap = NULL;
1558	iobase = self->io.fir_base;
1559	EnTXDMA(iobase, OFF);
1560	EnRXDMA(iobase, OFF);
1561	DisableDmaChannel(self->io.dma);
1562
1563	/* Disable interrupts */
1564	EnAllInt(iobase, OFF);
1565	free_irq(self->io.irq, dev);
1566	free_dma(self->io.dma);
1567	if (self->io.dma2 != self->io.dma)
1568		free_dma(self->io.dma2);
1569
1570	return 0;
1571}
1572
1573/*
1574 * Function via_ircc_net_ioctl (dev, rq, cmd)
1575 *
1576 *    Process IOCTL commands for this device
1577 *
1578 */
1579static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1580			      int cmd)
1581{
1582	struct if_irda_req *irq = (struct if_irda_req *) rq;
1583	struct via_ircc_cb *self;
1584	unsigned long flags;
1585	int ret = 0;
1586
1587	IRDA_ASSERT(dev != NULL, return -1;);
1588	self = netdev_priv(dev);
1589	IRDA_ASSERT(self != NULL, return -1;);
1590	IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1591		   cmd);
1592	/* Disable interrupts & save flags */
1593	spin_lock_irqsave(&self->lock, flags);
1594	switch (cmd) {
1595	case SIOCSBANDWIDTH:	/* Set bandwidth */
1596		if (!capable(CAP_NET_ADMIN)) {
1597			ret = -EPERM;
1598			goto out;
1599		}
1600		via_ircc_change_speed(self, irq->ifr_baudrate);
1601		break;
1602	case SIOCSMEDIABUSY:	/* Set media busy */
1603		if (!capable(CAP_NET_ADMIN)) {
1604			ret = -EPERM;
1605			goto out;
1606		}
1607		irda_device_set_media_busy(self->netdev, TRUE);
1608		break;
1609	case SIOCGRECEIVING:	/* Check if we are receiving right now */
1610		irq->ifr_receiving = via_ircc_is_receiving(self);
1611		break;
1612	default:
1613		ret = -EOPNOTSUPP;
1614	}
1615      out:
1616	spin_unlock_irqrestore(&self->lock, flags);
1617	return ret;
1618}
1619
1620MODULE_AUTHOR("VIA Technologies,inc");
1621MODULE_DESCRIPTION("VIA IrDA Device Driver");
1622MODULE_LICENSE("GPL");
1623
1624module_init(via_ircc_init);
1625module_exit(via_ircc_cleanup);
v4.6
   1/********************************************************************
   2 Filename:      via-ircc.c
   3 Version:       1.0 
   4 Description:   Driver for the VIA VT8231/VT8233 IrDA chipsets
   5 Author:        VIA Technologies,inc
   6 Date  :	08/06/2003
   7
   8Copyright (c) 1998-2003 VIA Technologies, Inc.
   9
  10This program is free software; you can redistribute it and/or modify it under
  11the terms of the GNU General Public License as published by the Free Software
  12Foundation; either version 2, or (at your option) any later version.
  13
  14This program is distributed in the hope that it will be useful, but WITHOUT
  15ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
  16MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17See the GNU General Public License for more details.
  18
  19You should have received a copy of the GNU General Public License along with
  20this program; if not, see <http://www.gnu.org/licenses/>.
 
  21
  22F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
  23F02 Oct/28/02: Add SB device ID for 3147 and 3177.
  24 Comment :
  25       jul/09/2002 : only implement two kind of dongle currently.
  26       Oct/02/2002 : work on VT8231 and VT8233 .
  27       Aug/06/2003 : change driver format to pci driver .
  28
  292004-02-16: <sda@bdit.de>
  30- Removed unneeded 'legacy' pci stuff.
  31- Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
  32- On speed change from core, don't send SIR frame with new speed. 
  33  Use current speed and change speeds later.
  34- Make module-param dongle_id actually work.
  35- New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only. 
  36  Tested with home-grown PCB on EPIA boards.
  37- Code cleanup.
  38       
  39 ********************************************************************/
  40#include <linux/module.h>
  41#include <linux/kernel.h>
  42#include <linux/types.h>
  43#include <linux/skbuff.h>
  44#include <linux/netdevice.h>
  45#include <linux/ioport.h>
  46#include <linux/delay.h>
  47#include <linux/init.h>
  48#include <linux/interrupt.h>
  49#include <linux/rtnetlink.h>
  50#include <linux/pci.h>
  51#include <linux/dma-mapping.h>
  52#include <linux/gfp.h>
  53
  54#include <asm/io.h>
  55#include <asm/dma.h>
  56#include <asm/byteorder.h>
  57
  58#include <linux/pm.h>
  59
  60#include <net/irda/wrapper.h>
  61#include <net/irda/irda.h>
  62#include <net/irda/irda_device.h>
  63
  64#include "via-ircc.h"
  65
  66#define VIA_MODULE_NAME "via-ircc"
  67#define CHIP_IO_EXTENT 0x40
  68
  69static char *driver_name = VIA_MODULE_NAME;
  70
  71/* Module parameters */
  72static int qos_mtt_bits = 0x07;	/* 1 ms or more */
  73static int dongle_id = 0;	/* default: probe */
  74
  75/* We can't guess the type of connected dongle, user *must* supply it. */
  76module_param(dongle_id, int, 0);
  77
  78/* Some prototypes */
  79static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
  80			 unsigned int id);
  81static int via_ircc_dma_receive(struct via_ircc_cb *self);
  82static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
  83					 int iobase);
  84static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
  85						struct net_device *dev);
  86static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
  87						struct net_device *dev);
  88static void via_hw_init(struct via_ircc_cb *self);
  89static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
  90static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
  91static int via_ircc_is_receiving(struct via_ircc_cb *self);
  92static int via_ircc_read_dongle_id(int iobase);
  93
  94static int via_ircc_net_open(struct net_device *dev);
  95static int via_ircc_net_close(struct net_device *dev);
  96static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
  97			      int cmd);
  98static void via_ircc_change_dongle_speed(int iobase, int speed,
  99					 int dongle_id);
 100static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
 101static void hwreset(struct via_ircc_cb *self);
 102static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
 103static int upload_rxdata(struct via_ircc_cb *self, int iobase);
 104static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
 105static void via_remove_one(struct pci_dev *pdev);
 106
 107/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
 108static void iodelay(int udelay)
 109{
 110	u8 data;
 111	int i;
 112
 113	for (i = 0; i < udelay; i++) {
 114		data = inb(0x80);
 115	}
 116}
 117
 118static const struct pci_device_id via_pci_tbl[] = {
 119	{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
 120	{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
 121	{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
 122	{ PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
 123	{ PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
 124	{ 0, }
 125};
 126
 127MODULE_DEVICE_TABLE(pci,via_pci_tbl);
 128
 129
 130static struct pci_driver via_driver = {
 131	.name		= VIA_MODULE_NAME,
 132	.id_table	= via_pci_tbl,
 133	.probe		= via_init_one,
 134	.remove		= via_remove_one,
 135};
 136
 137
 138/*
 139 * Function via_ircc_init ()
 140 *
 141 *    Initialize chip. Just find out chip type and resource.
 142 */
 143static int __init via_ircc_init(void)
 144{
 145	int rc;
 146
 
 
 147	rc = pci_register_driver(&via_driver);
 148	if (rc < 0) {
 149		pr_debug("%s(): error rc = %d, returning  -ENODEV...\n",
 150			 __func__, rc);
 151		return -ENODEV;
 152	}
 153	return 0;
 154}
 155
 156static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
 157{
 158	int rc;
 159        u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
 160	u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
 161	chipio_t info;
 162
 163	pr_debug("%s(): Device ID=(0X%X)\n", __func__, id->device);
 164
 165	rc = pci_enable_device (pcidev);
 166	if (rc) {
 167		pr_debug("%s(): error rc = %d\n", __func__, rc);
 168		return -ENODEV;
 169	}
 170
 171	// South Bridge exist
 172        if ( ReadLPCReg(0x20) != 0x3C )
 173		Chipset=0x3096;
 174	else
 175		Chipset=0x3076;
 176
 177	if (Chipset==0x3076) {
 178		pr_debug("%s(): Chipset = 3076\n", __func__);
 179
 180		WriteLPCReg(7,0x0c );
 181		temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
 182		if((temp&0x01)==1) {   // BIOS close or no FIR
 183			WriteLPCReg(0x1d, 0x82 );
 184			WriteLPCReg(0x23,0x18);
 185			temp=ReadLPCReg(0xF0);
 186			if((temp&0x01)==0) {
 187				temp=(ReadLPCReg(0x74)&0x03);    //DMA
 188				FirDRQ0=temp + 4;
 189				temp=(ReadLPCReg(0x74)&0x0C) >> 2;
 190				FirDRQ1=temp + 4;
 191			} else {
 192				temp=(ReadLPCReg(0x74)&0x0C) >> 2;    //DMA
 193				FirDRQ0=temp + 4;
 194				FirDRQ1=FirDRQ0;
 195			}
 196			FirIRQ=(ReadLPCReg(0x70)&0x0f);		//IRQ
 197			FirIOBase=ReadLPCReg(0x60 ) << 8;	//IO Space :high byte
 198			FirIOBase=FirIOBase| ReadLPCReg(0x61) ;	//low byte
 199			FirIOBase=FirIOBase  ;
 200			info.fir_base=FirIOBase;
 201			info.irq=FirIRQ;
 202			info.dma=FirDRQ1;
 203			info.dma2=FirDRQ0;
 204			pci_read_config_byte(pcidev,0x40,&bTmp);
 205			pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
 206			pci_read_config_byte(pcidev,0x42,&bTmp);
 207			pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
 208			pci_write_config_byte(pcidev,0x5a,0xc0);
 209			WriteLPCReg(0x28, 0x70 );
 210			rc = via_ircc_open(pcidev, &info, 0x3076);
 
 211		} else
 212			rc = -ENODEV; //IR not turn on	 
 213	} else { //Not VT1211
 214		pr_debug("%s(): Chipset = 3096\n", __func__);
 215
 216		pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
 217		if((bTmp&0x01)==1) {  // BIOS enable FIR
 218			//Enable Double DMA clock
 219			pci_read_config_byte(pcidev,0x42,&oldPCI_40);
 220			pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
 221			pci_read_config_byte(pcidev,0x40,&oldPCI_40);
 222			pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
 223			pci_read_config_byte(pcidev,0x44,&oldPCI_44);
 224			pci_write_config_byte(pcidev,0x44,0x4e);
 225  //---------- read configuration from Function0 of south bridge
 226			if((bTmp&0x02)==0) {
 227				pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
 228				FirDRQ0 = (bTmp1 & 0x30) >> 4;
 229				pci_read_config_byte(pcidev,0x44,&bTmp1);
 230				FirDRQ1 = (bTmp1 & 0xc0) >> 6;
 231			} else  {
 232				pci_read_config_byte(pcidev,0x44,&bTmp1);    //DMA
 233				FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
 234				FirDRQ1=0;
 235			}
 236			pci_read_config_byte(pcidev,0x47,&bTmp1);  //IRQ
 237			FirIRQ = bTmp1 & 0x0f;
 238
 239			pci_read_config_byte(pcidev,0x69,&bTmp);
 240			FirIOBase = bTmp << 8;//hight byte
 241			pci_read_config_byte(pcidev,0x68,&bTmp);
 242			FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
 243  //-------------------------
 244			info.fir_base=FirIOBase;
 245			info.irq=FirIRQ;
 246			info.dma=FirDRQ1;
 247			info.dma2=FirDRQ0;
 248			rc = via_ircc_open(pcidev, &info, 0x3096);
 
 249		} else
 250			rc = -ENODEV; //IR not turn on !!!!!
 251	}//Not VT1211
 252
 253	pr_debug("%s(): End - rc = %d\n", __func__, rc);
 254	return rc;
 255}
 256
 257static void __exit via_ircc_cleanup(void)
 258{
 
 
 259	/* Cleanup all instances of the driver */
 260	pci_unregister_driver (&via_driver); 
 261}
 262
 263static const struct net_device_ops via_ircc_sir_ops = {
 264	.ndo_start_xmit = via_ircc_hard_xmit_sir,
 265	.ndo_open = via_ircc_net_open,
 266	.ndo_stop = via_ircc_net_close,
 267	.ndo_do_ioctl = via_ircc_net_ioctl,
 268};
 269static const struct net_device_ops via_ircc_fir_ops = {
 270	.ndo_start_xmit = via_ircc_hard_xmit_fir,
 271	.ndo_open = via_ircc_net_open,
 272	.ndo_stop = via_ircc_net_close,
 273	.ndo_do_ioctl = via_ircc_net_ioctl,
 274};
 275
 276/*
 277 * Function via_ircc_open(pdev, iobase, irq)
 278 *
 279 *    Open driver instance
 280 *
 281 */
 282static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
 
 283{
 284	struct net_device *dev;
 285	struct via_ircc_cb *self;
 286	int err;
 287
 
 
 288	/* Allocate new instance of the driver */
 289	dev = alloc_irdadev(sizeof(struct via_ircc_cb));
 290	if (dev == NULL) 
 291		return -ENOMEM;
 292
 293	self = netdev_priv(dev);
 294	self->netdev = dev;
 295	spin_lock_init(&self->lock);
 296
 297	pci_set_drvdata(pdev, self);
 298
 299	/* Initialize Resource */
 300	self->io.cfg_base = info->cfg_base;
 301	self->io.fir_base = info->fir_base;
 302	self->io.irq = info->irq;
 303	self->io.fir_ext = CHIP_IO_EXTENT;
 304	self->io.dma = info->dma;
 305	self->io.dma2 = info->dma2;
 306	self->io.fifo_size = 32;
 307	self->chip_id = id;
 308	self->st_fifo.len = 0;
 309	self->RxDataReady = 0;
 310
 311	/* Reserve the ioports that we need */
 312	if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
 313		pr_debug("%s(), can't get iobase of 0x%03x\n",
 314			 __func__, self->io.fir_base);
 315		err = -ENODEV;
 316		goto err_out1;
 317	}
 318	
 319	/* Initialize QoS for this device */
 320	irda_init_max_qos_capabilies(&self->qos);
 321
 322	/* Check if user has supplied the dongle id or not */
 323	if (!dongle_id)
 324		dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
 325	self->io.dongle_id = dongle_id;
 326
 327	/* The only value we must override it the baudrate */
 328	/* Maximum speeds and capabilities are dongle-dependent. */
 329	switch( self->io.dongle_id ){
 330	case 0x0d:
 331		self->qos.baud_rate.bits =
 332		    IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
 333		    IR_576000 | IR_1152000 | (IR_4000000 << 8);
 334		break;
 335	default:
 336		self->qos.baud_rate.bits =
 337		    IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
 338		break;
 339	}
 340
 341	/* Following was used for testing:
 342	 *
 343	 *   self->qos.baud_rate.bits = IR_9600;
 344	 *
 345	 * Is is no good, as it prohibits (error-prone) speed-changes.
 346	 */
 347
 348	self->qos.min_turn_time.bits = qos_mtt_bits;
 349	irda_qos_bits_to_value(&self->qos);
 350
 351	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
 352	self->rx_buff.truesize = 14384 + 2048;
 353	self->tx_buff.truesize = 14384 + 2048;
 354
 355	/* Allocate memory if needed */
 356	self->rx_buff.head =
 357		dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
 358				    &self->rx_buff_dma, GFP_KERNEL);
 359	if (self->rx_buff.head == NULL) {
 360		err = -ENOMEM;
 361		goto err_out2;
 362	}
 
 363
 364	self->tx_buff.head =
 365		dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
 366				    &self->tx_buff_dma, GFP_KERNEL);
 367	if (self->tx_buff.head == NULL) {
 368		err = -ENOMEM;
 369		goto err_out3;
 370	}
 
 371
 372	self->rx_buff.in_frame = FALSE;
 373	self->rx_buff.state = OUTSIDE_FRAME;
 374	self->tx_buff.data = self->tx_buff.head;
 375	self->rx_buff.data = self->rx_buff.head;
 376
 377	/* Reset Tx queue info */
 378	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
 379	self->tx_fifo.tail = self->tx_buff.head;
 380
 381	/* Override the network functions we need to use */
 382	dev->netdev_ops = &via_ircc_sir_ops;
 383
 384	err = register_netdev(dev);
 385	if (err)
 386		goto err_out4;
 387
 388	net_info_ratelimited("IrDA: Registered device %s (via-ircc)\n",
 389			     dev->name);
 390
 391	/* Initialise the hardware..
 392	*/
 393	self->io.speed = 9600;
 394	via_hw_init(self);
 395	return 0;
 396 err_out4:
 397	dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
 398			  self->tx_buff.head, self->tx_buff_dma);
 399 err_out3:
 400	dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
 401			  self->rx_buff.head, self->rx_buff_dma);
 402 err_out2:
 403	release_region(self->io.fir_base, self->io.fir_ext);
 404 err_out1:
 
 405	free_netdev(dev);
 406	return err;
 407}
 408
 409/*
 410 * Function via_remove_one(pdev)
 411 *
 412 *    Close driver instance
 413 *
 414 */
 415static void via_remove_one(struct pci_dev *pdev)
 416{
 417	struct via_ircc_cb *self = pci_get_drvdata(pdev);
 418	int iobase;
 419
 
 
 420	iobase = self->io.fir_base;
 421
 422	ResetChip(iobase, 5);	//hardware reset.
 423	/* Remove netdevice */
 424	unregister_netdev(self->netdev);
 425
 426	/* Release the PORT that this driver is using */
 427	pr_debug("%s(), Releasing Region %03x\n",
 428		 __func__, self->io.fir_base);
 429	release_region(self->io.fir_base, self->io.fir_ext);
 430	if (self->tx_buff.head)
 431		dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
 432				  self->tx_buff.head, self->tx_buff_dma);
 433	if (self->rx_buff.head)
 434		dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
 435				  self->rx_buff.head, self->rx_buff_dma);
 
 436
 437	free_netdev(self->netdev);
 438
 439	pci_disable_device(pdev);
 440}
 441
 442/*
 443 * Function via_hw_init(self)
 444 *
 445 *    Returns non-negative on success.
 446 *
 447 * Formerly via_ircc_setup 
 448 */
 449static void via_hw_init(struct via_ircc_cb *self)
 450{
 451	int iobase = self->io.fir_base;
 452
 
 
 453	SetMaxRxPacketSize(iobase, 0x0fff);	//set to max:4095
 454	// FIFO Init
 455	EnRXFIFOReadyInt(iobase, OFF);
 456	EnRXFIFOHalfLevelInt(iobase, OFF);
 457	EnTXFIFOHalfLevelInt(iobase, OFF);
 458	EnTXFIFOUnderrunEOMInt(iobase, ON);
 459	EnTXFIFOReadyInt(iobase, OFF);
 460	InvertTX(iobase, OFF);
 461	InvertRX(iobase, OFF);
 462
 463	if (ReadLPCReg(0x20) == 0x3c)
 464		WriteLPCReg(0xF0, 0);	// for VT1211
 465	/* Int Init */
 466	EnRXSpecInt(iobase, ON);
 467
 468	/* The following is basically hwreset */
 469	/* If this is the case, why not just call hwreset() ? Jean II */
 470	ResetChip(iobase, 5);
 471	EnableDMA(iobase, OFF);
 472	EnableTX(iobase, OFF);
 473	EnableRX(iobase, OFF);
 474	EnRXDMA(iobase, OFF);
 475	EnTXDMA(iobase, OFF);
 476	RXStart(iobase, OFF);
 477	TXStart(iobase, OFF);
 478	InitCard(iobase);
 479	CommonInit(iobase);
 480	SIRFilter(iobase, ON);
 481	SetSIR(iobase, ON);
 482	CRC16(iobase, ON);
 483	EnTXCRC(iobase, 0);
 484	WriteReg(iobase, I_ST_CT_0, 0x00);
 485	SetBaudRate(iobase, 9600);
 486	SetPulseWidth(iobase, 12);
 487	SetSendPreambleCount(iobase, 0);
 488
 489	self->io.speed = 9600;
 490	self->st_fifo.len = 0;
 491
 492	via_ircc_change_dongle_speed(iobase, self->io.speed,
 493				     self->io.dongle_id);
 494
 495	WriteReg(iobase, I_ST_CT_0, 0x80);
 496}
 497
 498/*
 499 * Function via_ircc_read_dongle_id (void)
 500 *
 501 */
 502static int via_ircc_read_dongle_id(int iobase)
 503{
 504	net_err_ratelimited("via-ircc: dongle probing not supported, please specify dongle_id module parameter\n");
 505	return 9;	/* Default to IBM */
 
 
 506}
 507
 508/*
 509 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
 510 *    Change speed of the attach dongle
 511 *    only implement two type of dongle currently.
 512 */
 513static void via_ircc_change_dongle_speed(int iobase, int speed,
 514					 int dongle_id)
 515{
 516	u8 mode = 0;
 517
 518	/* speed is unused, as we use IsSIROn()/IsMIROn() */
 519	speed = speed;
 520
 521	pr_debug("%s(): change_dongle_speed to %d for 0x%x, %d\n",
 522		 __func__, speed, iobase, dongle_id);
 523
 524	switch (dongle_id) {
 525
 526		/* Note: The dongle_id's listed here are derived from
 527		 * nsc-ircc.c */ 
 528
 529	case 0x08:		/* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
 530		UseOneRX(iobase, ON);	// use one RX pin   RX1,RX2
 531		InvertTX(iobase, OFF);
 532		InvertRX(iobase, OFF);
 533
 534		EnRX2(iobase, ON);	//sir to rx2
 535		EnGPIOtoRX2(iobase, OFF);
 536
 537		if (IsSIROn(iobase)) {	//sir
 538			// Mode select Off
 539			SlowIRRXLowActive(iobase, ON);
 540			udelay(1000);
 541			SlowIRRXLowActive(iobase, OFF);
 542		} else {
 543			if (IsMIROn(iobase)) {	//mir
 544				// Mode select On
 545				SlowIRRXLowActive(iobase, OFF);
 546				udelay(20);
 547			} else {	// fir
 548				if (IsFIROn(iobase)) {	//fir
 549					// Mode select On
 550					SlowIRRXLowActive(iobase, OFF);
 551					udelay(20);
 552				}
 553			}
 554		}
 555		break;
 556
 557	case 0x09:		/* IBM31T1100 or Temic TFDS6000/TFDS6500 */
 558		UseOneRX(iobase, ON);	//use ONE RX....RX1
 559		InvertTX(iobase, OFF);
 560		InvertRX(iobase, OFF);	// invert RX pin
 561
 562		EnRX2(iobase, ON);
 563		EnGPIOtoRX2(iobase, OFF);
 564		if (IsSIROn(iobase)) {	//sir
 565			// Mode select On
 566			SlowIRRXLowActive(iobase, ON);
 567			udelay(20);
 568			// Mode select Off
 569			SlowIRRXLowActive(iobase, OFF);
 570		}
 571		if (IsMIROn(iobase)) {	//mir
 572			// Mode select On
 573			SlowIRRXLowActive(iobase, OFF);
 574			udelay(20);
 575			// Mode select Off
 576			SlowIRRXLowActive(iobase, ON);
 577		} else {	// fir
 578			if (IsFIROn(iobase)) {	//fir
 579				// Mode select On
 580				SlowIRRXLowActive(iobase, OFF);
 581				// TX On
 582				WriteTX(iobase, ON);
 583				udelay(20);
 584				// Mode select OFF
 585				SlowIRRXLowActive(iobase, ON);
 586				udelay(20);
 587				// TX Off
 588				WriteTX(iobase, OFF);
 589			}
 590		}
 591		break;
 592
 593	case 0x0d:
 594		UseOneRX(iobase, OFF);	// use two RX pin   RX1,RX2
 595		InvertTX(iobase, OFF);
 596		InvertRX(iobase, OFF);
 597		SlowIRRXLowActive(iobase, OFF);
 598		if (IsSIROn(iobase)) {	//sir
 599			EnGPIOtoRX2(iobase, OFF);
 600			WriteGIO(iobase, OFF);
 601			EnRX2(iobase, OFF);	//sir to rx2
 602		} else {	// fir mir
 603			EnGPIOtoRX2(iobase, OFF);
 604			WriteGIO(iobase, OFF);
 605			EnRX2(iobase, OFF);	//fir to rx
 606		}
 607		break;
 608
 609	case 0x11:		/* Temic TFDS4500 */
 610
 611		pr_debug("%s: Temic TFDS4500: One RX pin, TX normal, RX inverted\n",
 612			 __func__);
 613
 614		UseOneRX(iobase, ON);	//use ONE RX....RX1
 615		InvertTX(iobase, OFF);
 616		InvertRX(iobase, ON);	// invert RX pin
 617	
 618		EnRX2(iobase, ON);	//sir to rx2
 619		EnGPIOtoRX2(iobase, OFF);
 620
 621		if( IsSIROn(iobase) ){	//sir
 622
 623			// Mode select On
 624			SlowIRRXLowActive(iobase, ON);
 625			udelay(20);
 626			// Mode select Off
 627			SlowIRRXLowActive(iobase, OFF);
 628
 629		} else{
 630			pr_debug("%s: Warning: TFDS4500 not running in SIR mode !\n",
 631				 __func__);
 632		}
 633		break;
 634
 635	case 0x0ff:		/* Vishay */
 636		if (IsSIROn(iobase))
 637			mode = 0;
 638		else if (IsMIROn(iobase))
 639			mode = 1;
 640		else if (IsFIROn(iobase))
 641			mode = 2;
 642		else if (IsVFIROn(iobase))
 643			mode = 5;	//VFIR-16
 644		SI_SetMode(iobase, mode);
 645		break;
 646
 647	default:
 648		net_err_ratelimited("%s: Error: dongle_id %d unsupported !\n",
 649				    __func__, dongle_id);
 650	}
 651}
 652
 653/*
 654 * Function via_ircc_change_speed (self, baud)
 655 *
 656 *    Change the speed of the device
 657 *
 658 */
 659static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
 660{
 661	struct net_device *dev = self->netdev;
 662	u16 iobase;
 663	u8 value = 0, bTmp;
 664
 665	iobase = self->io.fir_base;
 666	/* Update accounting for new speed */
 667	self->io.speed = speed;
 668	pr_debug("%s: change_speed to %d bps.\n", __func__, speed);
 669
 670	WriteReg(iobase, I_ST_CT_0, 0x0);
 671
 672	/* Controller mode sellection */
 673	switch (speed) {
 674	case 2400:
 675	case 9600:
 676	case 19200:
 677	case 38400:
 678	case 57600:
 679	case 115200:
 680		value = (115200/speed)-1;
 681		SetSIR(iobase, ON);
 682		CRC16(iobase, ON);
 683		break;
 684	case 576000:
 685		/* FIXME: this can't be right, as it's the same as 115200,
 686		 * and 576000 is MIR, not SIR. */
 687		value = 0;
 688		SetSIR(iobase, ON);
 689		CRC16(iobase, ON);
 690		break;
 691	case 1152000:
 692		value = 0;
 693		SetMIR(iobase, ON);
 694		/* FIXME: CRC ??? */
 695		break;
 696	case 4000000:
 697		value = 0;
 698		SetFIR(iobase, ON);
 699		SetPulseWidth(iobase, 0);
 700		SetSendPreambleCount(iobase, 14);
 701		CRC16(iobase, OFF);
 702		EnTXCRC(iobase, ON);
 703		break;
 704	case 16000000:
 705		value = 0;
 706		SetVFIR(iobase, ON);
 707		/* FIXME: CRC ??? */
 708		break;
 709	default:
 710		value = 0;
 711		break;
 712	}
 713
 714	/* Set baudrate to 0x19[2..7] */
 715	bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
 716	bTmp |= value << 2;
 717	WriteReg(iobase, I_CF_H_1, bTmp);
 718
 719	/* Some dongles may need to be informed about speed changes. */
 720	via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
 721
 722	/* Set FIFO size to 64 */
 723	SetFIFO(iobase, 64);
 724
 725	/* Enable IR */
 726	WriteReg(iobase, I_ST_CT_0, 0x80);
 727
 728	// EnTXFIFOHalfLevelInt(iobase,ON);
 729
 730	/* Enable some interrupts so we can receive frames */
 731	//EnAllInt(iobase,ON);
 732
 733	if (IsSIROn(iobase)) {
 734		SIRFilter(iobase, ON);
 735		SIRRecvAny(iobase, ON);
 736	} else {
 737		SIRFilter(iobase, OFF);
 738		SIRRecvAny(iobase, OFF);
 739	}
 740
 741	if (speed > 115200) {
 742		/* Install FIR xmit handler */
 743		dev->netdev_ops = &via_ircc_fir_ops;
 744		via_ircc_dma_receive(self);
 745	} else {
 746		/* Install SIR xmit handler */
 747		dev->netdev_ops = &via_ircc_sir_ops;
 748	}
 749	netif_wake_queue(dev);
 750}
 751
 752/*
 753 * Function via_ircc_hard_xmit (skb, dev)
 754 *
 755 *    Transmit the frame!
 756 *
 757 */
 758static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
 759						struct net_device *dev)
 760{
 761	struct via_ircc_cb *self;
 762	unsigned long flags;
 763	u16 iobase;
 764	__u32 speed;
 765
 766	self = netdev_priv(dev);
 767	IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
 768	iobase = self->io.fir_base;
 769
 770	netif_stop_queue(dev);
 771	/* Check if we need to change the speed */
 772	speed = irda_get_next_speed(skb);
 773	if ((speed != self->io.speed) && (speed != -1)) {
 774		/* Check for empty frame */
 775		if (!skb->len) {
 776			via_ircc_change_speed(self, speed);
 777			dev->trans_start = jiffies;
 778			dev_kfree_skb(skb);
 779			return NETDEV_TX_OK;
 780		} else
 781			self->new_speed = speed;
 782	}
 783	InitCard(iobase);
 784	CommonInit(iobase);
 785	SIRFilter(iobase, ON);
 786	SetSIR(iobase, ON);
 787	CRC16(iobase, ON);
 788	EnTXCRC(iobase, 0);
 789	WriteReg(iobase, I_ST_CT_0, 0x00);
 790
 791	spin_lock_irqsave(&self->lock, flags);
 792	self->tx_buff.data = self->tx_buff.head;
 793	self->tx_buff.len =
 794	    async_wrap_skb(skb, self->tx_buff.data,
 795			   self->tx_buff.truesize);
 796
 797	dev->stats.tx_bytes += self->tx_buff.len;
 798	/* Send this frame with old speed */
 799	SetBaudRate(iobase, self->io.speed);
 800	SetPulseWidth(iobase, 12);
 801	SetSendPreambleCount(iobase, 0);
 802	WriteReg(iobase, I_ST_CT_0, 0x80);
 803
 804	EnableTX(iobase, ON);
 805	EnableRX(iobase, OFF);
 806
 807	ResetChip(iobase, 0);
 808	ResetChip(iobase, 1);
 809	ResetChip(iobase, 2);
 810	ResetChip(iobase, 3);
 811	ResetChip(iobase, 4);
 812
 813	EnAllInt(iobase, ON);
 814	EnTXDMA(iobase, ON);
 815	EnRXDMA(iobase, OFF);
 816
 817	irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
 818		       DMA_TX_MODE);
 819
 820	SetSendByte(iobase, self->tx_buff.len);
 821	RXStart(iobase, OFF);
 822	TXStart(iobase, ON);
 823
 824	dev->trans_start = jiffies;
 825	spin_unlock_irqrestore(&self->lock, flags);
 826	dev_kfree_skb(skb);
 827	return NETDEV_TX_OK;
 828}
 829
 830static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
 831						struct net_device *dev)
 832{
 833	struct via_ircc_cb *self;
 834	u16 iobase;
 835	__u32 speed;
 836	unsigned long flags;
 837
 838	self = netdev_priv(dev);
 839	iobase = self->io.fir_base;
 840
 841	if (self->st_fifo.len)
 842		return NETDEV_TX_OK;
 843	if (self->chip_id == 0x3076)
 844		iodelay(1500);
 845	else
 846		udelay(1500);
 847	netif_stop_queue(dev);
 848	speed = irda_get_next_speed(skb);
 849	if ((speed != self->io.speed) && (speed != -1)) {
 850		if (!skb->len) {
 851			via_ircc_change_speed(self, speed);
 852			dev->trans_start = jiffies;
 853			dev_kfree_skb(skb);
 854			return NETDEV_TX_OK;
 855		} else
 856			self->new_speed = speed;
 857	}
 858	spin_lock_irqsave(&self->lock, flags);
 859	self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
 860	self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
 861
 862	self->tx_fifo.tail += skb->len;
 863	dev->stats.tx_bytes += skb->len;
 864	skb_copy_from_linear_data(skb,
 865		      self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
 866	self->tx_fifo.len++;
 867	self->tx_fifo.free++;
 868//F01   if (self->tx_fifo.len == 1) {
 869	via_ircc_dma_xmit(self, iobase);
 870//F01   }
 871//F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
 872	dev->trans_start = jiffies;
 873	dev_kfree_skb(skb);
 874	spin_unlock_irqrestore(&self->lock, flags);
 875	return NETDEV_TX_OK;
 876
 877}
 878
 879static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
 880{
 881	EnTXDMA(iobase, OFF);
 882	self->io.direction = IO_XMIT;
 883	EnPhys(iobase, ON);
 884	EnableTX(iobase, ON);
 885	EnableRX(iobase, OFF);
 886	ResetChip(iobase, 0);
 887	ResetChip(iobase, 1);
 888	ResetChip(iobase, 2);
 889	ResetChip(iobase, 3);
 890	ResetChip(iobase, 4);
 891	EnAllInt(iobase, ON);
 892	EnTXDMA(iobase, ON);
 893	EnRXDMA(iobase, OFF);
 894	irda_setup_dma(self->io.dma,
 895		       ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
 896			self->tx_buff.head) + self->tx_buff_dma,
 897		       self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
 898	pr_debug("%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
 899		 __func__, self->tx_fifo.ptr,
 900		 self->tx_fifo.queue[self->tx_fifo.ptr].len,
 901		 self->tx_fifo.len);
 902
 903	SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
 904	RXStart(iobase, OFF);
 905	TXStart(iobase, ON);
 906	return 0;
 907
 908}
 909
 910/*
 911 * Function via_ircc_dma_xmit_complete (self)
 912 *
 913 *    The transfer of a frame in finished. This function will only be called 
 914 *    by the interrupt handler
 915 *
 916 */
 917static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
 918{
 919	int iobase;
 
 920	u8 Tx_status;
 921
 
 
 922	iobase = self->io.fir_base;
 923	/* Disable DMA */
 924//      DisableDmaChannel(self->io.dma);
 925	/* Check for underrun! */
 926	/* Clear bit, by writing 1 into it */
 927	Tx_status = GetTXStatus(iobase);
 928	if (Tx_status & 0x08) {
 929		self->netdev->stats.tx_errors++;
 930		self->netdev->stats.tx_fifo_errors++;
 931		hwreset(self);
 932	/* how to clear underrun? */
 933	} else {
 934		self->netdev->stats.tx_packets++;
 935		ResetChip(iobase, 3);
 936		ResetChip(iobase, 4);
 937	}
 938	/* Check if we need to change the speed */
 939	if (self->new_speed) {
 940		via_ircc_change_speed(self, self->new_speed);
 941		self->new_speed = 0;
 942	}
 943
 944	/* Finished with this frame, so prepare for next */
 945	if (IsFIROn(iobase)) {
 946		if (self->tx_fifo.len) {
 947			self->tx_fifo.len--;
 948			self->tx_fifo.ptr++;
 949		}
 950	}
 951	pr_debug("%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
 952		 __func__,
 953		 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
 
 954/* F01_S
 955	// Any frames to be sent back-to-back? 
 956	if (self->tx_fifo.len) {
 957		// Not finished yet! 
 958	  	via_ircc_dma_xmit(self, iobase);
 959		ret = FALSE;
 960	} else { 
 961F01_E*/
 962	// Reset Tx FIFO info 
 963	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
 964	self->tx_fifo.tail = self->tx_buff.head;
 965//F01   }
 966
 967	// Make sure we have room for more frames 
 968//F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
 969	// Not busy transmitting anymore 
 970	// Tell the network layer, that we can accept more frames 
 971	netif_wake_queue(self->netdev);
 972//F01   }
 973	return TRUE;
 974}
 975
 976/*
 977 * Function via_ircc_dma_receive (self)
 978 *
 979 *    Set configuration for receive a frame.
 980 *
 981 */
 982static int via_ircc_dma_receive(struct via_ircc_cb *self)
 983{
 984	int iobase;
 985
 986	iobase = self->io.fir_base;
 987
 
 
 988	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
 989	self->tx_fifo.tail = self->tx_buff.head;
 990	self->RxDataReady = 0;
 991	self->io.direction = IO_RECV;
 992	self->rx_buff.data = self->rx_buff.head;
 993	self->st_fifo.len = self->st_fifo.pending_bytes = 0;
 994	self->st_fifo.tail = self->st_fifo.head = 0;
 995
 996	EnPhys(iobase, ON);
 997	EnableTX(iobase, OFF);
 998	EnableRX(iobase, ON);
 999
1000	ResetChip(iobase, 0);
1001	ResetChip(iobase, 1);
1002	ResetChip(iobase, 2);
1003	ResetChip(iobase, 3);
1004	ResetChip(iobase, 4);
1005
1006	EnAllInt(iobase, ON);
1007	EnTXDMA(iobase, OFF);
1008	EnRXDMA(iobase, ON);
1009	irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1010		  self->rx_buff.truesize, DMA_RX_MODE);
1011	TXStart(iobase, OFF);
1012	RXStart(iobase, ON);
1013
1014	return 0;
1015}
1016
1017/*
1018 * Function via_ircc_dma_receive_complete (self)
1019 *
1020 *    Controller Finished with receiving frames,
1021 *    and this routine is call by ISR
1022 *    
1023 */
1024static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1025					 int iobase)
1026{
1027	struct st_fifo *st_fifo;
1028	struct sk_buff *skb;
1029	int len, i;
1030	u8 status = 0;
1031
1032	iobase = self->io.fir_base;
1033	st_fifo = &self->st_fifo;
1034
1035	if (self->io.speed < 4000000) {	//Speed below FIR
1036		len = GetRecvByte(iobase, self);
1037		skb = dev_alloc_skb(len + 1);
1038		if (skb == NULL)
1039			return FALSE;
1040		// Make sure IP header gets aligned 
1041		skb_reserve(skb, 1);
1042		skb_put(skb, len - 2);
1043		if (self->chip_id == 0x3076) {
1044			for (i = 0; i < len - 2; i++)
1045				skb->data[i] = self->rx_buff.data[i * 2];
1046		} else {
1047			if (self->chip_id == 0x3096) {
1048				for (i = 0; i < len - 2; i++)
1049					skb->data[i] =
1050					    self->rx_buff.data[i];
1051			}
1052		}
1053		// Move to next frame 
1054		self->rx_buff.data += len;
1055		self->netdev->stats.rx_bytes += len;
1056		self->netdev->stats.rx_packets++;
1057		skb->dev = self->netdev;
1058		skb_reset_mac_header(skb);
1059		skb->protocol = htons(ETH_P_IRDA);
1060		netif_rx(skb);
1061		return TRUE;
1062	}
1063
1064	else {			//FIR mode
1065		len = GetRecvByte(iobase, self);
1066		if (len == 0)
1067			return TRUE;	//interrupt only, data maybe move by RxT  
1068		if (((len - 4) < 2) || ((len - 4) > 2048)) {
1069			pr_debug("%s(): Trouble:len=%x,CurCount=%x,LastCount=%x\n",
1070				 __func__, len, RxCurCount(iobase, self),
1071				 self->RxLastCount);
1072			hwreset(self);
1073			return FALSE;
1074		}
1075		pr_debug("%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1076			 __func__,
1077			 st_fifo->len, len - 4, RxCurCount(iobase, self));
1078
1079		st_fifo->entries[st_fifo->tail].status = status;
1080		st_fifo->entries[st_fifo->tail].len = len;
1081		st_fifo->pending_bytes += len;
1082		st_fifo->tail++;
1083		st_fifo->len++;
1084		if (st_fifo->tail > MAX_RX_WINDOW)
1085			st_fifo->tail = 0;
1086		self->RxDataReady = 0;
1087
1088		// It maybe have MAX_RX_WINDOW package receive by
1089		// receive_complete before Timer IRQ
1090/* F01_S
1091          if (st_fifo->len < (MAX_RX_WINDOW+2 )) { 
1092		  RXStart(iobase,ON);
1093	  	  SetTimer(iobase,4);
1094	  }
1095	  else	  { 
1096F01_E */
1097		EnableRX(iobase, OFF);
1098		EnRXDMA(iobase, OFF);
1099		RXStart(iobase, OFF);
1100//F01_S
1101		// Put this entry back in fifo 
1102		if (st_fifo->head > MAX_RX_WINDOW)
1103			st_fifo->head = 0;
1104		status = st_fifo->entries[st_fifo->head].status;
1105		len = st_fifo->entries[st_fifo->head].len;
1106		st_fifo->head++;
1107		st_fifo->len--;
1108
1109		skb = dev_alloc_skb(len + 1 - 4);
1110		/*
1111		 * if frame size, data ptr, or skb ptr are wrong, then get next
1112		 * entry.
1113		 */
1114		if ((skb == NULL) || (skb->data == NULL) ||
1115		    (self->rx_buff.data == NULL) || (len < 6)) {
1116			self->netdev->stats.rx_dropped++;
1117			kfree_skb(skb);
1118			return TRUE;
1119		}
1120		skb_reserve(skb, 1);
1121		skb_put(skb, len - 4);
1122
1123		skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1124		pr_debug("%s(): len=%x.rx_buff=%p\n", __func__,
1125			 len - 4, self->rx_buff.data);
1126
1127		// Move to next frame 
1128		self->rx_buff.data += len;
1129		self->netdev->stats.rx_bytes += len;
1130		self->netdev->stats.rx_packets++;
1131		skb->dev = self->netdev;
1132		skb_reset_mac_header(skb);
1133		skb->protocol = htons(ETH_P_IRDA);
1134		netif_rx(skb);
1135
1136//F01_E
1137	}			//FIR
1138	return TRUE;
1139
1140}
1141
1142/*
1143 * if frame is received , but no INT ,then use this routine to upload frame.
1144 */
1145static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1146{
1147	struct sk_buff *skb;
1148	int len;
1149	struct st_fifo *st_fifo;
1150	st_fifo = &self->st_fifo;
1151
1152	len = GetRecvByte(iobase, self);
1153
1154	pr_debug("%s(): len=%x\n", __func__, len);
1155
1156	if ((len - 4) < 2) {
1157		self->netdev->stats.rx_dropped++;
1158		return FALSE;
1159	}
1160
1161	skb = dev_alloc_skb(len + 1);
1162	if (skb == NULL) {
1163		self->netdev->stats.rx_dropped++;
1164		return FALSE;
1165	}
1166	skb_reserve(skb, 1);
1167	skb_put(skb, len - 4 + 1);
1168	skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1169	st_fifo->tail++;
1170	st_fifo->len++;
1171	if (st_fifo->tail > MAX_RX_WINDOW)
1172		st_fifo->tail = 0;
1173	// Move to next frame 
1174	self->rx_buff.data += len;
1175	self->netdev->stats.rx_bytes += len;
1176	self->netdev->stats.rx_packets++;
1177	skb->dev = self->netdev;
1178	skb_reset_mac_header(skb);
1179	skb->protocol = htons(ETH_P_IRDA);
1180	netif_rx(skb);
1181	if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1182		RXStart(iobase, ON);
1183	} else {
1184		EnableRX(iobase, OFF);
1185		EnRXDMA(iobase, OFF);
1186		RXStart(iobase, OFF);
1187	}
1188	return TRUE;
1189}
1190
1191/*
1192 * Implement back to back receive , use this routine to upload data.
1193 */
1194
1195static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1196{
1197	struct st_fifo *st_fifo;
1198	struct sk_buff *skb;
1199	int len;
1200	u8 status;
1201
1202	st_fifo = &self->st_fifo;
1203
1204	if (CkRxRecv(iobase, self)) {
1205		// if still receiving ,then return ,don't upload frame 
1206		self->RetryCount = 0;
1207		SetTimer(iobase, 20);
1208		self->RxDataReady++;
1209		return FALSE;
1210	} else
1211		self->RetryCount++;
1212
1213	if ((self->RetryCount >= 1) ||
1214	    ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1215	    (st_fifo->len >= (MAX_RX_WINDOW))) {
1216		while (st_fifo->len > 0) {	//upload frame
1217			// Put this entry back in fifo 
1218			if (st_fifo->head > MAX_RX_WINDOW)
1219				st_fifo->head = 0;
1220			status = st_fifo->entries[st_fifo->head].status;
1221			len = st_fifo->entries[st_fifo->head].len;
1222			st_fifo->head++;
1223			st_fifo->len--;
1224
1225			skb = dev_alloc_skb(len + 1 - 4);
1226			/*
1227			 * if frame size, data ptr, or skb ptr are wrong,
1228			 * then get next entry.
1229			 */
1230			if ((skb == NULL) || (skb->data == NULL) ||
1231			    (self->rx_buff.data == NULL) || (len < 6)) {
1232				self->netdev->stats.rx_dropped++;
1233				continue;
1234			}
1235			skb_reserve(skb, 1);
1236			skb_put(skb, len - 4);
1237			skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1238
1239			pr_debug("%s(): len=%x.head=%x\n", __func__,
1240				 len - 4, st_fifo->head);
1241
1242			// Move to next frame 
1243			self->rx_buff.data += len;
1244			self->netdev->stats.rx_bytes += len;
1245			self->netdev->stats.rx_packets++;
1246			skb->dev = self->netdev;
1247			skb_reset_mac_header(skb);
1248			skb->protocol = htons(ETH_P_IRDA);
1249			netif_rx(skb);
1250		}		//while
1251		self->RetryCount = 0;
1252
1253		pr_debug("%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1254			 __func__, GetHostStatus(iobase), GetRXStatus(iobase));
 
 
1255
1256		/*
1257		 * if frame is receive complete at this routine ,then upload
1258		 * frame.
1259		 */
1260		if ((GetRXStatus(iobase) & 0x10) &&
1261		    (RxCurCount(iobase, self) != self->RxLastCount)) {
1262			upload_rxdata(self, iobase);
1263			if (irda_device_txqueue_empty(self->netdev))
1264				via_ircc_dma_receive(self);
1265		}
1266	}			// timer detect complete
1267	else
1268		SetTimer(iobase, 4);
1269	return TRUE;
1270
1271}
1272
1273
1274
1275/*
1276 * Function via_ircc_interrupt (irq, dev_id)
1277 *
1278 *    An interrupt from the chip has arrived. Time to do some work
1279 *
1280 */
1281static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1282{
1283	struct net_device *dev = dev_id;
1284	struct via_ircc_cb *self = netdev_priv(dev);
1285	int iobase;
1286	u8 iHostIntType, iRxIntType, iTxIntType;
1287
1288	iobase = self->io.fir_base;
1289	spin_lock(&self->lock);
1290	iHostIntType = GetHostStatus(iobase);
1291
1292	pr_debug("%s(): iHostIntType %02x:  %s %s %s  %02x\n",
1293		 __func__, iHostIntType,
1294		 (iHostIntType & 0x40) ? "Timer" : "",
1295		 (iHostIntType & 0x20) ? "Tx" : "",
1296		 (iHostIntType & 0x10) ? "Rx" : "",
1297		 (iHostIntType & 0x0e) >> 1);
1298
1299	if ((iHostIntType & 0x40) != 0) {	//Timer Event
1300		self->EventFlag.TimeOut++;
1301		ClearTimerInt(iobase, 1);
1302		if (self->io.direction == IO_XMIT) {
1303			via_ircc_dma_xmit(self, iobase);
1304		}
1305		if (self->io.direction == IO_RECV) {
1306			/*
1307			 * frame ready hold too long, must reset.
1308			 */
1309			if (self->RxDataReady > 30) {
1310				hwreset(self);
1311				if (irda_device_txqueue_empty(self->netdev)) {
1312					via_ircc_dma_receive(self);
1313				}
1314			} else {	// call this to upload frame.
1315				RxTimerHandler(self, iobase);
1316			}
1317		}		//RECV
1318	}			//Timer Event
1319	if ((iHostIntType & 0x20) != 0) {	//Tx Event
1320		iTxIntType = GetTXStatus(iobase);
1321
1322		pr_debug("%s(): iTxIntType %02x:  %s %s %s %s\n",
1323			 __func__, iTxIntType,
1324			 (iTxIntType & 0x08) ? "FIFO underr." : "",
1325			 (iTxIntType & 0x04) ? "EOM" : "",
1326			 (iTxIntType & 0x02) ? "FIFO ready" : "",
1327			 (iTxIntType & 0x01) ? "Early EOM" : "");
1328
1329		if (iTxIntType & 0x4) {
1330			self->EventFlag.EOMessage++;	// read and will auto clean
1331			if (via_ircc_dma_xmit_complete(self)) {
1332				if (irda_device_txqueue_empty
1333				    (self->netdev)) {
1334					via_ircc_dma_receive(self);
1335				}
1336			} else {
1337				self->EventFlag.Unknown++;
1338			}
1339		}		//EOP
1340	}			//Tx Event
1341	//----------------------------------------
1342	if ((iHostIntType & 0x10) != 0) {	//Rx Event
1343		/* Check if DMA has finished */
1344		iRxIntType = GetRXStatus(iobase);
1345
1346		pr_debug("%s(): iRxIntType %02x:  %s %s %s %s %s %s %s\n",
1347			 __func__, iRxIntType,
1348			 (iRxIntType & 0x80) ? "PHY err."	: "",
1349			 (iRxIntType & 0x40) ? "CRC err"	: "",
1350			 (iRxIntType & 0x20) ? "FIFO overr."	: "",
1351			 (iRxIntType & 0x10) ? "EOF"		: "",
1352			 (iRxIntType & 0x08) ? "RxData"		: "",
1353			 (iRxIntType & 0x02) ? "RxMaxLen"	: "",
1354			 (iRxIntType & 0x01) ? "SIR bad"	: "");
1355		if (!iRxIntType)
1356			pr_debug("%s(): RxIRQ =0\n", __func__);
1357
1358		if (iRxIntType & 0x10) {
1359			if (via_ircc_dma_receive_complete(self, iobase)) {
1360//F01       if(!(IsFIROn(iobase)))  via_ircc_dma_receive(self);
1361				via_ircc_dma_receive(self);
1362			}
1363		}		// No ERR     
1364		else {		//ERR
1365			pr_debug("%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1366				 __func__, iRxIntType, iHostIntType,
1367				 RxCurCount(iobase, self), self->RxLastCount);
 
1368
1369			if (iRxIntType & 0x20) {	//FIFO OverRun ERR
1370				ResetChip(iobase, 0);
1371				ResetChip(iobase, 1);
1372			} else {	//PHY,CRC ERR
1373
1374				if (iRxIntType != 0x08)
1375					hwreset(self);	//F01
1376			}
1377			via_ircc_dma_receive(self);
1378		}		//ERR
1379
1380	}			//Rx Event
1381	spin_unlock(&self->lock);
1382	return IRQ_RETVAL(iHostIntType);
1383}
1384
1385static void hwreset(struct via_ircc_cb *self)
1386{
1387	int iobase;
1388	iobase = self->io.fir_base;
1389
 
 
1390	ResetChip(iobase, 5);
1391	EnableDMA(iobase, OFF);
1392	EnableTX(iobase, OFF);
1393	EnableRX(iobase, OFF);
1394	EnRXDMA(iobase, OFF);
1395	EnTXDMA(iobase, OFF);
1396	RXStart(iobase, OFF);
1397	TXStart(iobase, OFF);
1398	InitCard(iobase);
1399	CommonInit(iobase);
1400	SIRFilter(iobase, ON);
1401	SetSIR(iobase, ON);
1402	CRC16(iobase, ON);
1403	EnTXCRC(iobase, 0);
1404	WriteReg(iobase, I_ST_CT_0, 0x00);
1405	SetBaudRate(iobase, 9600);
1406	SetPulseWidth(iobase, 12);
1407	SetSendPreambleCount(iobase, 0);
1408	WriteReg(iobase, I_ST_CT_0, 0x80);
1409
1410	/* Restore speed. */
1411	via_ircc_change_speed(self, self->io.speed);
1412
1413	self->st_fifo.len = 0;
1414}
1415
1416/*
1417 * Function via_ircc_is_receiving (self)
1418 *
1419 *    Return TRUE is we are currently receiving a frame
1420 *
1421 */
1422static int via_ircc_is_receiving(struct via_ircc_cb *self)
1423{
1424	int status = FALSE;
1425	int iobase;
1426
1427	IRDA_ASSERT(self != NULL, return FALSE;);
1428
1429	iobase = self->io.fir_base;
1430	if (CkRxRecv(iobase, self))
1431		status = TRUE;
1432
1433	pr_debug("%s(): status=%x....\n", __func__, status);
1434
1435	return status;
1436}
1437
1438
1439/*
1440 * Function via_ircc_net_open (dev)
1441 *
1442 *    Start the device
1443 *
1444 */
1445static int via_ircc_net_open(struct net_device *dev)
1446{
1447	struct via_ircc_cb *self;
1448	int iobase;
1449	char hwname[32];
1450
 
 
1451	IRDA_ASSERT(dev != NULL, return -1;);
1452	self = netdev_priv(dev);
1453	dev->stats.rx_packets = 0;
1454	IRDA_ASSERT(self != NULL, return 0;);
1455	iobase = self->io.fir_base;
1456	if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1457		net_warn_ratelimited("%s, unable to allocate irq=%d\n",
1458				     driver_name, self->io.irq);
1459		return -EAGAIN;
1460	}
1461	/*
1462	 * Always allocate the DMA channel after the IRQ, and clean up on 
1463	 * failure.
1464	 */
1465	if (request_dma(self->io.dma, dev->name)) {
1466		net_warn_ratelimited("%s, unable to allocate dma=%d\n",
1467				     driver_name, self->io.dma);
1468		free_irq(self->io.irq, dev);
1469		return -EAGAIN;
1470	}
1471	if (self->io.dma2 != self->io.dma) {
1472		if (request_dma(self->io.dma2, dev->name)) {
1473			net_warn_ratelimited("%s, unable to allocate dma2=%d\n",
1474					     driver_name, self->io.dma2);
1475			free_irq(self->io.irq, dev);
1476			free_dma(self->io.dma);
1477			return -EAGAIN;
1478		}
1479	}
1480
1481
1482	/* turn on interrupts */
1483	EnAllInt(iobase, ON);
1484	EnInternalLoop(iobase, OFF);
1485	EnExternalLoop(iobase, OFF);
1486
1487	/* */
1488	via_ircc_dma_receive(self);
1489
1490	/* Ready to play! */
1491	netif_start_queue(dev);
1492
1493	/* 
1494	 * Open new IrLAP layer instance, now that everything should be
1495	 * initialized properly 
1496	 */
1497	sprintf(hwname, "VIA @ 0x%x", iobase);
1498	self->irlap = irlap_open(dev, &self->qos, hwname);
1499
1500	self->RxLastCount = 0;
1501
1502	return 0;
1503}
1504
1505/*
1506 * Function via_ircc_net_close (dev)
1507 *
1508 *    Stop the device
1509 *
1510 */
1511static int via_ircc_net_close(struct net_device *dev)
1512{
1513	struct via_ircc_cb *self;
1514	int iobase;
1515
 
 
1516	IRDA_ASSERT(dev != NULL, return -1;);
1517	self = netdev_priv(dev);
1518	IRDA_ASSERT(self != NULL, return 0;);
1519
1520	/* Stop device */
1521	netif_stop_queue(dev);
1522	/* Stop and remove instance of IrLAP */
1523	if (self->irlap)
1524		irlap_close(self->irlap);
1525	self->irlap = NULL;
1526	iobase = self->io.fir_base;
1527	EnTXDMA(iobase, OFF);
1528	EnRXDMA(iobase, OFF);
1529	DisableDmaChannel(self->io.dma);
1530
1531	/* Disable interrupts */
1532	EnAllInt(iobase, OFF);
1533	free_irq(self->io.irq, dev);
1534	free_dma(self->io.dma);
1535	if (self->io.dma2 != self->io.dma)
1536		free_dma(self->io.dma2);
1537
1538	return 0;
1539}
1540
1541/*
1542 * Function via_ircc_net_ioctl (dev, rq, cmd)
1543 *
1544 *    Process IOCTL commands for this device
1545 *
1546 */
1547static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1548			      int cmd)
1549{
1550	struct if_irda_req *irq = (struct if_irda_req *) rq;
1551	struct via_ircc_cb *self;
1552	unsigned long flags;
1553	int ret = 0;
1554
1555	IRDA_ASSERT(dev != NULL, return -1;);
1556	self = netdev_priv(dev);
1557	IRDA_ASSERT(self != NULL, return -1;);
1558	pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1559		 cmd);
1560	/* Disable interrupts & save flags */
1561	spin_lock_irqsave(&self->lock, flags);
1562	switch (cmd) {
1563	case SIOCSBANDWIDTH:	/* Set bandwidth */
1564		if (!capable(CAP_NET_ADMIN)) {
1565			ret = -EPERM;
1566			goto out;
1567		}
1568		via_ircc_change_speed(self, irq->ifr_baudrate);
1569		break;
1570	case SIOCSMEDIABUSY:	/* Set media busy */
1571		if (!capable(CAP_NET_ADMIN)) {
1572			ret = -EPERM;
1573			goto out;
1574		}
1575		irda_device_set_media_busy(self->netdev, TRUE);
1576		break;
1577	case SIOCGRECEIVING:	/* Check if we are receiving right now */
1578		irq->ifr_receiving = via_ircc_is_receiving(self);
1579		break;
1580	default:
1581		ret = -EOPNOTSUPP;
1582	}
1583      out:
1584	spin_unlock_irqrestore(&self->lock, flags);
1585	return ret;
1586}
1587
1588MODULE_AUTHOR("VIA Technologies,inc");
1589MODULE_DESCRIPTION("VIA IrDA Device Driver");
1590MODULE_LICENSE("GPL");
1591
1592module_init(via_ircc_init);
1593module_exit(via_ircc_cleanup);