Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1 /*
   2  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
   3  * All rights reserved.  www.lanmedia.com
   4  * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
   5  *
   6  * This code is written by:
   7  * Andrew Stanley-Jones (asj@cban.com)
   8  * Rob Braun (bbraun@vix.com),
   9  * Michael Graff (explorer@vix.com) and
  10  * Matt Thomas (matt@3am-software.com).
  11  *
  12  * With Help By:
  13  * David Boggs
  14  * Ron Crane
  15  * Alan Cox
  16  *
  17  * This software may be used and distributed according to the terms
  18  * of the GNU General Public License version 2, incorporated herein by reference.
  19  *
  20  * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
  21  *
  22  * To control link specific options lmcctl is required.
  23  * It can be obtained from ftp.lanmedia.com.
  24  *
  25  * Linux driver notes:
  26  * Linux uses the device struct lmc_private to pass private information
  27  * around.
  28  *
  29  * The initialization portion of this driver (the lmc_reset() and the
  30  * lmc_dec_reset() functions, as well as the led controls and the
  31  * lmc_initcsrs() functions.
  32  *
  33  * The watchdog function runs every second and checks to see if
  34  * we still have link, and that the timing source is what we expected
  35  * it to be.  If link is lost, the interface is marked down, and
  36  * we no longer can transmit.
  37  *
  38  */
  39
  40#include <linux/kernel.h>
  41#include <linux/module.h>
  42#include <linux/string.h>
  43#include <linux/timer.h>
  44#include <linux/ptrace.h>
  45#include <linux/errno.h>
  46#include <linux/ioport.h>
  47#include <linux/slab.h>
  48#include <linux/interrupt.h>
  49#include <linux/pci.h>
  50#include <linux/delay.h>
  51#include <linux/hdlc.h>
  52#include <linux/init.h>
  53#include <linux/in.h>
  54#include <linux/if_arp.h>
  55#include <linux/netdevice.h>
  56#include <linux/etherdevice.h>
  57#include <linux/skbuff.h>
  58#include <linux/inet.h>
  59#include <linux/bitops.h>
  60#include <asm/processor.h>             /* Processor type for cache alignment. */
  61#include <asm/io.h>
  62#include <asm/dma.h>
  63#include <asm/uaccess.h>
  64//#include <asm/spinlock.h>
  65
  66#define DRIVER_MAJOR_VERSION     1
  67#define DRIVER_MINOR_VERSION    34
  68#define DRIVER_SUB_VERSION       0
  69
  70#define DRIVER_VERSION  ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
  71
  72#include "lmc.h"
  73#include "lmc_var.h"
  74#include "lmc_ioctl.h"
  75#include "lmc_debug.h"
  76#include "lmc_proto.h"
  77
  78static int LMC_PKT_BUF_SZ = 1542;
  79
  80static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
  81	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
  82	  PCI_VENDOR_ID_LMC, PCI_ANY_ID },
  83	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
  84	  PCI_ANY_ID, PCI_VENDOR_ID_LMC },
  85	{ 0 }
  86};
  87
  88MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
  89MODULE_LICENSE("GPL v2");
  90
  91
  92static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
  93					struct net_device *dev);
  94static int lmc_rx (struct net_device *dev);
  95static int lmc_open(struct net_device *dev);
  96static int lmc_close(struct net_device *dev);
  97static struct net_device_stats *lmc_get_stats(struct net_device *dev);
  98static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
  99static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
 100static void lmc_softreset(lmc_softc_t * const);
 101static void lmc_running_reset(struct net_device *dev);
 102static int lmc_ifdown(struct net_device * const);
 103static void lmc_watchdog(unsigned long data);
 104static void lmc_reset(lmc_softc_t * const sc);
 105static void lmc_dec_reset(lmc_softc_t * const sc);
 106static void lmc_driver_timeout(struct net_device *dev);
 107
 108/*
 109 * linux reserves 16 device specific IOCTLs.  We call them
 110 * LMCIOC* to control various bits of our world.
 111 */
 112int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
 113{
 114    lmc_softc_t *sc = dev_to_sc(dev);
 115    lmc_ctl_t ctl;
 116    int ret = -EOPNOTSUPP;
 117    u16 regVal;
 118    unsigned long flags;
 119
 120    lmc_trace(dev, "lmc_ioctl in");
 121
 122    /*
 123     * Most functions mess with the structure
 124     * Disable interrupts while we do the polling
 125     */
 126
 127    switch (cmd) {
 128        /*
 129         * Return current driver state.  Since we keep this up
 130         * To date internally, just copy this out to the user.
 131         */
 132    case LMCIOCGINFO: /*fold01*/
 133	if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
 134		ret = -EFAULT;
 135	else
 136		ret = 0;
 137        break;
 138
 139    case LMCIOCSINFO: /*fold01*/
 140        if (!capable(CAP_NET_ADMIN)) {
 141            ret = -EPERM;
 142            break;
 143        }
 144
 145        if(dev->flags & IFF_UP){
 146            ret = -EBUSY;
 147            break;
 148        }
 149
 150	if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
 151		ret = -EFAULT;
 152		break;
 153	}
 154
 155	spin_lock_irqsave(&sc->lmc_lock, flags);
 156        sc->lmc_media->set_status (sc, &ctl);
 157
 158        if(ctl.crc_length != sc->ictl.crc_length) {
 159            sc->lmc_media->set_crc_length(sc, ctl.crc_length);
 160	    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
 161		sc->TxDescriptControlInit |=  LMC_TDES_ADD_CRC_DISABLE;
 162	    else
 163		sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
 164        }
 165	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 166
 167        ret = 0;
 168        break;
 169
 170    case LMCIOCIFTYPE: /*fold01*/
 171        {
 172	    u16 old_type = sc->if_type;
 173	    u16	new_type;
 174
 175	    if (!capable(CAP_NET_ADMIN)) {
 176		ret = -EPERM;
 177		break;
 178	    }
 179
 180	    if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
 181		ret = -EFAULT;
 182		break;
 183	    }
 184
 185            
 186	    if (new_type == old_type)
 187	    {
 188		ret = 0 ;
 189		break;				/* no change */
 190            }
 191            
 192	    spin_lock_irqsave(&sc->lmc_lock, flags);
 193            lmc_proto_close(sc);
 194
 195            sc->if_type = new_type;
 196            lmc_proto_attach(sc);
 197	    ret = lmc_proto_open(sc);
 198	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 199	    break;
 200	}
 201
 202    case LMCIOCGETXINFO: /*fold01*/
 203	spin_lock_irqsave(&sc->lmc_lock, flags);
 204        sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
 205
 206        sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
 207        sc->lmc_xinfo.PciSlotNumber = 0;
 208        sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
 209        sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
 210        sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
 211        sc->lmc_xinfo.XilinxRevisionNumber =
 212            lmc_mii_readreg (sc, 0, 3) & 0xf;
 213        sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
 214        sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
 215        sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
 216	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 217
 218        sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
 219
 220        if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
 221			 sizeof(struct lmc_xinfo)))
 222		ret = -EFAULT;
 223	else
 224		ret = 0;
 225
 226        break;
 227
 228    case LMCIOCGETLMCSTATS:
 229	    spin_lock_irqsave(&sc->lmc_lock, flags);
 230	    if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
 231		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
 232		    sc->extra_stats.framingBitErrorCount +=
 233			    lmc_mii_readreg(sc, 0, 18) & 0xff;
 234		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
 235		    sc->extra_stats.framingBitErrorCount +=
 236			    (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
 237		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
 238		    sc->extra_stats.lineCodeViolationCount +=
 239			    lmc_mii_readreg(sc, 0, 18) & 0xff;
 240		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
 241		    sc->extra_stats.lineCodeViolationCount +=
 242			    (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
 243		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
 244		    regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
 245
 246		    sc->extra_stats.lossOfFrameCount +=
 247			    (regVal & T1FRAMER_LOF_MASK) >> 4;
 248		    sc->extra_stats.changeOfFrameAlignmentCount +=
 249			    (regVal & T1FRAMER_COFA_MASK) >> 2;
 250		    sc->extra_stats.severelyErroredFrameCount +=
 251			    regVal & T1FRAMER_SEF_MASK;
 252	    }
 253	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 254	    if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
 255			     sizeof(sc->lmc_device->stats)) ||
 256		copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
 257			     &sc->extra_stats, sizeof(sc->extra_stats)))
 258		    ret = -EFAULT;
 259	    else
 260		    ret = 0;
 261	    break;
 262
 263    case LMCIOCCLEARLMCSTATS:
 264	    if (!capable(CAP_NET_ADMIN)) {
 265		    ret = -EPERM;
 266		    break;
 267	    }
 268
 269	    spin_lock_irqsave(&sc->lmc_lock, flags);
 270	    memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
 271	    memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
 272	    sc->extra_stats.check = STATCHECK;
 273	    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
 274		    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
 275	    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
 276	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 277	    ret = 0;
 278	    break;
 279
 280    case LMCIOCSETCIRCUIT: /*fold01*/
 281        if (!capable(CAP_NET_ADMIN)){
 282            ret = -EPERM;
 283            break;
 284        }
 285
 286        if(dev->flags & IFF_UP){
 287            ret = -EBUSY;
 288            break;
 289        }
 290
 291	if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
 292		ret = -EFAULT;
 293		break;
 294	}
 295	spin_lock_irqsave(&sc->lmc_lock, flags);
 296        sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
 297        sc->ictl.circuit_type = ctl.circuit_type;
 298	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 299        ret = 0;
 300
 301        break;
 302
 303    case LMCIOCRESET: /*fold01*/
 304        if (!capable(CAP_NET_ADMIN)){
 305            ret = -EPERM;
 306            break;
 307        }
 308
 309	spin_lock_irqsave(&sc->lmc_lock, flags);
 310        /* Reset driver and bring back to current state */
 311        printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
 312        lmc_running_reset (dev);
 313        printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
 314
 315        LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
 316	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 317
 318        ret = 0;
 319        break;
 320
 321#ifdef DEBUG
 322    case LMCIOCDUMPEVENTLOG:
 323	if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
 324		ret = -EFAULT;
 325		break;
 326	}
 327	if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
 328			 sizeof(lmcEventLogBuf)))
 329		ret = -EFAULT;
 330	else
 331		ret = 0;
 332
 333        break;
 334#endif /* end ifdef _DBG_EVENTLOG */
 335    case LMCIOCT1CONTROL: /*fold01*/
 336        if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
 337            ret = -EOPNOTSUPP;
 338            break;
 339        }
 340        break;
 341    case LMCIOCXILINX: /*fold01*/
 342        {
 343            struct lmc_xilinx_control xc; /*fold02*/
 344
 345            if (!capable(CAP_NET_ADMIN)){
 346                ret = -EPERM;
 347                break;
 348            }
 349
 350            /*
 351             * Stop the xwitter whlie we restart the hardware
 352             */
 353            netif_stop_queue(dev);
 354
 355	    if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
 356		ret = -EFAULT;
 357		break;
 358	    }
 359            switch(xc.command){
 360            case lmc_xilinx_reset: /*fold02*/
 361                {
 362                    u16 mii;
 363		    spin_lock_irqsave(&sc->lmc_lock, flags);
 364                    mii = lmc_mii_readreg (sc, 0, 16);
 365
 366                    /*
 367                     * Make all of them 0 and make input
 368                     */
 369                    lmc_gpio_mkinput(sc, 0xff);
 370
 371                    /*
 372                     * make the reset output
 373                     */
 374                    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
 375
 376                    /*
 377                     * RESET low to force configuration.  This also forces
 378                     * the transmitter clock to be internal, but we expect to reset
 379                     * that later anyway.
 380                     */
 381
 382                    sc->lmc_gpio &= ~LMC_GEP_RESET;
 383                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 384
 385
 386                    /*
 387                     * hold for more than 10 microseconds
 388                     */
 389                    udelay(50);
 390
 391                    sc->lmc_gpio |= LMC_GEP_RESET;
 392                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 393
 394
 395                    /*
 396                     * stop driving Xilinx-related signals
 397                     */
 398                    lmc_gpio_mkinput(sc, 0xff);
 399
 400                    /* Reset the frammer hardware */
 401                    sc->lmc_media->set_link_status (sc, 1);
 402                    sc->lmc_media->set_status (sc, NULL);
 403//                    lmc_softreset(sc);
 404
 405                    {
 406                        int i;
 407                        for(i = 0; i < 5; i++){
 408                            lmc_led_on(sc, LMC_DS3_LED0);
 409                            mdelay(100);
 410                            lmc_led_off(sc, LMC_DS3_LED0);
 411                            lmc_led_on(sc, LMC_DS3_LED1);
 412                            mdelay(100);
 413                            lmc_led_off(sc, LMC_DS3_LED1);
 414                            lmc_led_on(sc, LMC_DS3_LED3);
 415                            mdelay(100);
 416                            lmc_led_off(sc, LMC_DS3_LED3);
 417                            lmc_led_on(sc, LMC_DS3_LED2);
 418                            mdelay(100);
 419                            lmc_led_off(sc, LMC_DS3_LED2);
 420                        }
 421                    }
 422		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 423                    
 424                    
 425
 426                    ret = 0x0;
 427
 428                }
 429
 430                break;
 431            case lmc_xilinx_load_prom: /*fold02*/
 432                {
 433                    u16 mii;
 434                    int timeout = 500000;
 435		    spin_lock_irqsave(&sc->lmc_lock, flags);
 436                    mii = lmc_mii_readreg (sc, 0, 16);
 437
 438                    /*
 439                     * Make all of them 0 and make input
 440                     */
 441                    lmc_gpio_mkinput(sc, 0xff);
 442
 443                    /*
 444                     * make the reset output
 445                     */
 446                    lmc_gpio_mkoutput(sc,  LMC_GEP_DP | LMC_GEP_RESET);
 447
 448                    /*
 449                     * RESET low to force configuration.  This also forces
 450                     * the transmitter clock to be internal, but we expect to reset
 451                     * that later anyway.
 452                     */
 453
 454                    sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
 455                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 456
 457
 458                    /*
 459                     * hold for more than 10 microseconds
 460                     */
 461                    udelay(50);
 462
 463                    sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
 464                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 465
 466                    /*
 467                     * busy wait for the chip to reset
 468                     */
 469                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
 470                           (timeout-- > 0))
 471                        cpu_relax();
 472
 473
 474                    /*
 475                     * stop driving Xilinx-related signals
 476                     */
 477                    lmc_gpio_mkinput(sc, 0xff);
 478		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 479
 480                    ret = 0x0;
 481                    
 482
 483                    break;
 484
 485                }
 486
 487            case lmc_xilinx_load: /*fold02*/
 488                {
 489                    char *data;
 490                    int pos;
 491                    int timeout = 500000;
 492
 493                    if (!xc.data) {
 494                            ret = -EINVAL;
 495                            break;
 496                    }
 497
 498                    data = kmalloc(xc.len, GFP_KERNEL);
 499                    if (!data) {
 500                            ret = -ENOMEM;
 501                            break;
 502                    }
 503                    
 504                    if(copy_from_user(data, xc.data, xc.len))
 505                    {
 506                    	kfree(data);
 507                    	ret = -ENOMEM;
 508                    	break;
 509                    }
 510
 511                    printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
 512
 513		    spin_lock_irqsave(&sc->lmc_lock, flags);
 514                    lmc_gpio_mkinput(sc, 0xff);
 515
 516                    /*
 517                     * Clear the Xilinx and start prgramming from the DEC
 518                     */
 519
 520                    /*
 521                     * Set ouput as:
 522                     * Reset: 0 (active)
 523                     * DP:    0 (active)
 524                     * Mode:  1
 525                     *
 526                     */
 527                    sc->lmc_gpio = 0x00;
 528                    sc->lmc_gpio &= ~LMC_GEP_DP;
 529                    sc->lmc_gpio &= ~LMC_GEP_RESET;
 530                    sc->lmc_gpio |=  LMC_GEP_MODE;
 531                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 532
 533                    lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
 534
 535                    /*
 536                     * Wait at least 10 us 20 to be safe
 537                     */
 538                    udelay(50);
 539
 540                    /*
 541                     * Clear reset and activate programming lines
 542                     * Reset: Input
 543                     * DP:    Input
 544                     * Clock: Output
 545                     * Data:  Output
 546                     * Mode:  Output
 547                     */
 548                    lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
 549
 550                    /*
 551                     * Set LOAD, DATA, Clock to 1
 552                     */
 553                    sc->lmc_gpio = 0x00;
 554                    sc->lmc_gpio |= LMC_GEP_MODE;
 555                    sc->lmc_gpio |= LMC_GEP_DATA;
 556                    sc->lmc_gpio |= LMC_GEP_CLK;
 557                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 558                    
 559                    lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
 560
 561                    /*
 562                     * busy wait for the chip to reset
 563                     */
 564                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
 565                           (timeout-- > 0))
 566                        cpu_relax();
 567
 568                    printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
 569
 570                    for(pos = 0; pos < xc.len; pos++){
 571                        switch(data[pos]){
 572                        case 0:
 573                            sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
 574                            break;
 575                        case 1:
 576                            sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
 577                            break;
 578                        default:
 579                            printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
 580                            sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
 581                        }
 582                        sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
 583                        sc->lmc_gpio |= LMC_GEP_MODE;
 584                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 585                        udelay(1);
 586                        
 587                        sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
 588                        sc->lmc_gpio |= LMC_GEP_MODE;
 589                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 590                        udelay(1);
 591                    }
 592                    if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
 593                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
 594                    }
 595                    else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
 596                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
 597                    }
 598                    else {
 599                        printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
 600                    }
 601
 602                    lmc_gpio_mkinput(sc, 0xff);
 603                    
 604                    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
 605                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
 606
 607                    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
 608                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
 609		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 610
 611                    kfree(data);
 612                    
 613                    ret = 0;
 614                    
 615                    break;
 616                }
 617            default: /*fold02*/
 618                ret = -EBADE;
 619                break;
 620            }
 621
 622            netif_wake_queue(dev);
 623            sc->lmc_txfull = 0;
 624
 625        }
 626        break;
 627    default: /*fold01*/
 628        /* If we don't know what to do, give the protocol a shot. */
 629        ret = lmc_proto_ioctl (sc, ifr, cmd);
 630        break;
 631    }
 632
 633    lmc_trace(dev, "lmc_ioctl out");
 634
 635    return ret;
 636}
 637
 638
 639/* the watchdog process that cruises around */
 640static void lmc_watchdog (unsigned long data) /*fold00*/
 641{
 642    struct net_device *dev = (struct net_device *)data;
 643    lmc_softc_t *sc = dev_to_sc(dev);
 644    int link_status;
 645    u32 ticks;
 646    unsigned long flags;
 647
 648    lmc_trace(dev, "lmc_watchdog in");
 649
 650    spin_lock_irqsave(&sc->lmc_lock, flags);
 651
 652    if(sc->check != 0xBEAFCAFE){
 653        printk("LMC: Corrupt net_device struct, breaking out\n");
 654	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 655        return;
 656    }
 657
 658
 659    /* Make sure the tx jabber and rx watchdog are off,
 660     * and the transmit and receive processes are running.
 661     */
 662
 663    LMC_CSR_WRITE (sc, csr_15, 0x00000011);
 664    sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
 665    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
 666
 667    if (sc->lmc_ok == 0)
 668        goto kick_timer;
 669
 670    LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
 671
 672    /* --- begin time out check -----------------------------------
 673     * check for a transmit interrupt timeout
 674     * Has the packet xmt vs xmt serviced threshold been exceeded */
 675    if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
 676	sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
 677	sc->tx_TimeoutInd == 0)
 678    {
 679
 680        /* wait for the watchdog to come around again */
 681        sc->tx_TimeoutInd = 1;
 682    }
 683    else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
 684	     sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
 685	     sc->tx_TimeoutInd)
 686    {
 687
 688        LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
 689
 690        sc->tx_TimeoutDisplay = 1;
 691	sc->extra_stats.tx_TimeoutCnt++;
 692
 693        /* DEC chip is stuck, hit it with a RESET!!!! */
 694        lmc_running_reset (dev);
 695
 696
 697        /* look at receive & transmit process state to make sure they are running */
 698        LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
 699
 700        /* look at: DSR - 02  for Reg 16
 701         *                  CTS - 08
 702         *                  DCD - 10
 703         *                  RI  - 20
 704         * for Reg 17
 705         */
 706        LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
 707
 708        /* reset the transmit timeout detection flag */
 709        sc->tx_TimeoutInd = 0;
 710        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
 711	sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
 712    } else {
 713        sc->tx_TimeoutInd = 0;
 714        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
 715	sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
 716    }
 717
 718    /* --- end time out check ----------------------------------- */
 719
 720
 721    link_status = sc->lmc_media->get_link_status (sc);
 722
 723    /*
 724     * hardware level link lost, but the interface is marked as up.
 725     * Mark it as down.
 726     */
 727    if ((link_status == 0) && (sc->last_link_status != 0)) {
 728        printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
 729        sc->last_link_status = 0;
 730        /* lmc_reset (sc); Why reset??? The link can go down ok */
 731
 732        /* Inform the world that link has been lost */
 733	netif_carrier_off(dev);
 734    }
 735
 736    /*
 737     * hardware link is up, but the interface is marked as down.
 738     * Bring it back up again.
 739     */
 740     if (link_status != 0 && sc->last_link_status == 0) {
 741         printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
 742         sc->last_link_status = 1;
 743         /* lmc_reset (sc); Again why reset??? */
 744
 745	 netif_carrier_on(dev);
 746     }
 747
 748    /* Call media specific watchdog functions */
 749    sc->lmc_media->watchdog(sc);
 750
 751    /*
 752     * Poke the transmitter to make sure it
 753     * never stops, even if we run out of mem
 754     */
 755    LMC_CSR_WRITE(sc, csr_rxpoll, 0);
 756
 757    /*
 758     * Check for code that failed
 759     * and try and fix it as appropriate
 760     */
 761    if(sc->failed_ring == 1){
 762        /*
 763         * Failed to setup the recv/xmit rin
 764         * Try again
 765         */
 766        sc->failed_ring = 0;
 767        lmc_softreset(sc);
 768    }
 769    if(sc->failed_recv_alloc == 1){
 770        /*
 771         * We failed to alloc mem in the
 772         * interrupt handler, go through the rings
 773         * and rebuild them
 774         */
 775        sc->failed_recv_alloc = 0;
 776        lmc_softreset(sc);
 777    }
 778
 779
 780    /*
 781     * remember the timer value
 782     */
 783kick_timer:
 784
 785    ticks = LMC_CSR_READ (sc, csr_gp_timer);
 786    LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
 787    sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
 788
 789    /*
 790     * restart this timer.
 791     */
 792    sc->timer.expires = jiffies + (HZ);
 793    add_timer (&sc->timer);
 794
 795    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 796
 797    lmc_trace(dev, "lmc_watchdog out");
 798
 799}
 800
 801static int lmc_attach(struct net_device *dev, unsigned short encoding,
 802		      unsigned short parity)
 803{
 804	if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
 805		return 0;
 806	return -EINVAL;
 807}
 808
 809static const struct net_device_ops lmc_ops = {
 810	.ndo_open       = lmc_open,
 811	.ndo_stop       = lmc_close,
 812	.ndo_change_mtu = hdlc_change_mtu,
 813	.ndo_start_xmit = hdlc_start_xmit,
 814	.ndo_do_ioctl   = lmc_ioctl,
 815	.ndo_tx_timeout = lmc_driver_timeout,
 816	.ndo_get_stats  = lmc_get_stats,
 817};
 818
 819static int __devinit lmc_init_one(struct pci_dev *pdev,
 820				  const struct pci_device_id *ent)
 821{
 822	lmc_softc_t *sc;
 823	struct net_device *dev;
 824	u16 subdevice;
 825	u16 AdapModelNum;
 826	int err;
 827	static int cards_found;
 828
 829	/* lmc_trace(dev, "lmc_init_one in"); */
 830
 831	err = pci_enable_device(pdev);
 832	if (err) {
 833		printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
 834		return err;
 835	}
 836
 837	err = pci_request_regions(pdev, "lmc");
 838	if (err) {
 839		printk(KERN_ERR "lmc: pci_request_region failed\n");
 840		goto err_req_io;
 841	}
 842
 843	/*
 844	 * Allocate our own device structure
 845	 */
 846	sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
 847	if (!sc) {
 848		err = -ENOMEM;
 849		goto err_kzalloc;
 850	}
 851
 852	dev = alloc_hdlcdev(sc);
 853	if (!dev) {
 854		printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
 855		goto err_hdlcdev;
 856	}
 857
 858
 859	dev->type = ARPHRD_HDLC;
 860	dev_to_hdlc(dev)->xmit = lmc_start_xmit;
 861	dev_to_hdlc(dev)->attach = lmc_attach;
 862	dev->netdev_ops = &lmc_ops;
 863	dev->watchdog_timeo = HZ; /* 1 second */
 864	dev->tx_queue_len = 100;
 865	sc->lmc_device = dev;
 866	sc->name = dev->name;
 867	sc->if_type = LMC_PPP;
 868	sc->check = 0xBEAFCAFE;
 869	dev->base_addr = pci_resource_start(pdev, 0);
 870	dev->irq = pdev->irq;
 871	pci_set_drvdata(pdev, dev);
 872	SET_NETDEV_DEV(dev, &pdev->dev);
 873
 874	/*
 875	 * This will get the protocol layer ready and do any 1 time init's
 876	 * Must have a valid sc and dev structure
 877	 */
 878	lmc_proto_attach(sc);
 879
 880	/* Init the spin lock so can call it latter */
 881
 882	spin_lock_init(&sc->lmc_lock);
 883	pci_set_master(pdev);
 884
 885	printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
 886	       dev->base_addr, dev->irq);
 887
 888	err = register_hdlc_device(dev);
 889	if (err) {
 890		printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
 891		free_netdev(dev);
 892		goto err_hdlcdev;
 893	}
 894
 895    sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
 896    sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
 897
 898    /*
 899     *
 900     * Check either the subvendor or the subdevice, some systems reverse
 901     * the setting in the bois, seems to be version and arch dependent?
 902     * Fix the error, exchange the two values 
 903     */
 904    if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
 905	    subdevice = pdev->subsystem_vendor;
 906
 907    switch (subdevice) {
 908    case PCI_DEVICE_ID_LMC_HSSI:
 909	printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
 910        sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
 911        sc->lmc_media = &lmc_hssi_media;
 912        break;
 913    case PCI_DEVICE_ID_LMC_DS3:
 914	printk(KERN_INFO "%s: LMC DS3\n", dev->name);
 915        sc->lmc_cardtype = LMC_CARDTYPE_DS3;
 916        sc->lmc_media = &lmc_ds3_media;
 917        break;
 918    case PCI_DEVICE_ID_LMC_SSI:
 919	printk(KERN_INFO "%s: LMC SSI\n", dev->name);
 920        sc->lmc_cardtype = LMC_CARDTYPE_SSI;
 921        sc->lmc_media = &lmc_ssi_media;
 922        break;
 923    case PCI_DEVICE_ID_LMC_T1:
 924	printk(KERN_INFO "%s: LMC T1\n", dev->name);
 925        sc->lmc_cardtype = LMC_CARDTYPE_T1;
 926        sc->lmc_media = &lmc_t1_media;
 927        break;
 928    default:
 929	printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
 930        break;
 931    }
 932
 933    lmc_initcsrs (sc, dev->base_addr, 8);
 934
 935    lmc_gpio_mkinput (sc, 0xff);
 936    sc->lmc_gpio = 0;		/* drive no signals yet */
 937
 938    sc->lmc_media->defaults (sc);
 939
 940    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
 941
 942    /* verify that the PCI Sub System ID matches the Adapter Model number
 943     * from the MII register
 944     */
 945    AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
 946
 947    if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
 948	 subdevice != PCI_DEVICE_ID_LMC_T1) &&
 949	(AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
 950	 subdevice != PCI_DEVICE_ID_LMC_SSI) &&
 951	(AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
 952	 subdevice != PCI_DEVICE_ID_LMC_DS3) &&
 953	(AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
 954	 subdevice != PCI_DEVICE_ID_LMC_HSSI))
 955	    printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
 956		   " Subsystem ID = 0x%04x\n",
 957		   dev->name, AdapModelNum, subdevice);
 958
 959    /*
 960     * reset clock
 961     */
 962    LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
 963
 964    sc->board_idx = cards_found++;
 965    sc->extra_stats.check = STATCHECK;
 966    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
 967	    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
 968    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
 969
 970    sc->lmc_ok = 0;
 971    sc->last_link_status = 0;
 972
 973    lmc_trace(dev, "lmc_init_one out");
 974    return 0;
 975
 976err_hdlcdev:
 977	pci_set_drvdata(pdev, NULL);
 978	kfree(sc);
 979err_kzalloc:
 980	pci_release_regions(pdev);
 981err_req_io:
 982	pci_disable_device(pdev);
 983	return err;
 984}
 985
 986/*
 987 * Called from pci when removing module.
 988 */
 989static void __devexit lmc_remove_one(struct pci_dev *pdev)
 990{
 991	struct net_device *dev = pci_get_drvdata(pdev);
 992
 993	if (dev) {
 994		printk(KERN_DEBUG "%s: removing...\n", dev->name);
 995		unregister_hdlc_device(dev);
 996		free_netdev(dev);
 997		pci_release_regions(pdev);
 998		pci_disable_device(pdev);
 999		pci_set_drvdata(pdev, NULL);
1000	}
1001}
1002
1003/* After this is called, packets can be sent.
1004 * Does not initialize the addresses
1005 */
1006static int lmc_open(struct net_device *dev)
1007{
1008    lmc_softc_t *sc = dev_to_sc(dev);
1009    int err;
1010
1011    lmc_trace(dev, "lmc_open in");
1012
1013    lmc_led_on(sc, LMC_DS3_LED0);
1014
1015    lmc_dec_reset(sc);
1016    lmc_reset(sc);
1017
1018    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
1019    LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
1020		  lmc_mii_readreg(sc, 0, 17));
1021
1022    if (sc->lmc_ok){
1023        lmc_trace(dev, "lmc_open lmc_ok out");
1024        return 0;
1025    }
1026
1027    lmc_softreset (sc);
1028
1029    /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1030    if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
1031        printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
1032        lmc_trace(dev, "lmc_open irq failed out");
1033        return -EAGAIN;
1034    }
1035    sc->got_irq = 1;
1036
1037    /* Assert Terminal Active */
1038    sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
1039    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
1040
1041    /*
1042     * reset to last state.
1043     */
1044    sc->lmc_media->set_status (sc, NULL);
1045
1046    /* setup default bits to be used in tulip_desc_t transmit descriptor
1047     * -baz */
1048    sc->TxDescriptControlInit = (
1049                                 LMC_TDES_INTERRUPT_ON_COMPLETION
1050                                 | LMC_TDES_FIRST_SEGMENT
1051                                 | LMC_TDES_LAST_SEGMENT
1052                                 | LMC_TDES_SECOND_ADDR_CHAINED
1053                                 | LMC_TDES_DISABLE_PADDING
1054                                );
1055
1056    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
1057        /* disable 32 bit CRC generated by ASIC */
1058        sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
1059    }
1060    sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
1061    /* Acknoledge the Terminal Active and light LEDs */
1062
1063    /* dev->flags |= IFF_UP; */
1064
1065    if ((err = lmc_proto_open(sc)) != 0)
1066	    return err;
1067
1068    netif_start_queue(dev);
1069    sc->extra_stats.tx_tbusy0++;
1070
1071    /*
1072     * select what interrupts we want to get
1073     */
1074    sc->lmc_intrmask = 0;
1075    /* Should be using the default interrupt mask defined in the .h file. */
1076    sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
1077                         | TULIP_STS_RXINTR
1078                         | TULIP_STS_TXINTR
1079                         | TULIP_STS_ABNRMLINTR
1080                         | TULIP_STS_SYSERROR
1081                         | TULIP_STS_TXSTOPPED
1082                         | TULIP_STS_TXUNDERFLOW
1083                         | TULIP_STS_RXSTOPPED
1084		         | TULIP_STS_RXNOBUF
1085                        );
1086    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1087
1088    sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
1089    sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
1090    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1091
1092    sc->lmc_ok = 1; /* Run watchdog */
1093
1094    /*
1095     * Set the if up now - pfb
1096     */
1097
1098    sc->last_link_status = 1;
1099
1100    /*
1101     * Setup a timer for the watchdog on probe, and start it running.
1102     * Since lmc_ok == 0, it will be a NOP for now.
1103     */
1104    init_timer (&sc->timer);
1105    sc->timer.expires = jiffies + HZ;
1106    sc->timer.data = (unsigned long) dev;
1107    sc->timer.function = lmc_watchdog;
1108    add_timer (&sc->timer);
1109
1110    lmc_trace(dev, "lmc_open out");
1111
1112    return 0;
1113}
1114
1115/* Total reset to compensate for the AdTran DSU doing bad things
1116 *  under heavy load
1117 */
1118
1119static void lmc_running_reset (struct net_device *dev) /*fold00*/
1120{
1121    lmc_softc_t *sc = dev_to_sc(dev);
1122
1123    lmc_trace(dev, "lmc_running_reset in");
1124
1125    /* stop interrupts */
1126    /* Clear the interrupt mask */
1127    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1128
1129    lmc_dec_reset (sc);
1130    lmc_reset (sc);
1131    lmc_softreset (sc);
1132    /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1133    sc->lmc_media->set_link_status (sc, 1);
1134    sc->lmc_media->set_status (sc, NULL);
1135
1136    netif_wake_queue(dev);
1137
1138    sc->lmc_txfull = 0;
1139    sc->extra_stats.tx_tbusy0++;
1140
1141    sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
1142    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1143
1144    sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
1145    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1146
1147    lmc_trace(dev, "lmc_runnin_reset_out");
1148}
1149
1150
1151/* This is what is called when you ifconfig down a device.
1152 * This disables the timer for the watchdog and keepalives,
1153 * and disables the irq for dev.
1154 */
1155static int lmc_close(struct net_device *dev)
1156{
1157    /* not calling release_region() as we should */
1158    lmc_softc_t *sc = dev_to_sc(dev);
1159
1160    lmc_trace(dev, "lmc_close in");
1161
1162    sc->lmc_ok = 0;
1163    sc->lmc_media->set_link_status (sc, 0);
1164    del_timer (&sc->timer);
1165    lmc_proto_close(sc);
1166    lmc_ifdown (dev);
1167
1168    lmc_trace(dev, "lmc_close out");
1169
1170    return 0;
1171}
1172
1173/* Ends the transfer of packets */
1174/* When the interface goes down, this is called */
1175static int lmc_ifdown (struct net_device *dev) /*fold00*/
1176{
1177    lmc_softc_t *sc = dev_to_sc(dev);
1178    u32 csr6;
1179    int i;
1180
1181    lmc_trace(dev, "lmc_ifdown in");
1182
1183    /* Don't let anything else go on right now */
1184    //    dev->start = 0;
1185    netif_stop_queue(dev);
1186    sc->extra_stats.tx_tbusy1++;
1187
1188    /* stop interrupts */
1189    /* Clear the interrupt mask */
1190    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1191
1192    /* Stop Tx and Rx on the chip */
1193    csr6 = LMC_CSR_READ (sc, csr_command);
1194    csr6 &= ~LMC_DEC_ST;		/* Turn off the Transmission bit */
1195    csr6 &= ~LMC_DEC_SR;		/* Turn off the Receive bit */
1196    LMC_CSR_WRITE (sc, csr_command, csr6);
1197
1198    sc->lmc_device->stats.rx_missed_errors +=
1199	    LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1200
1201    /* release the interrupt */
1202    if(sc->got_irq == 1){
1203        free_irq (dev->irq, dev);
1204        sc->got_irq = 0;
1205    }
1206
1207    /* free skbuffs in the Rx queue */
1208    for (i = 0; i < LMC_RXDESCS; i++)
1209    {
1210        struct sk_buff *skb = sc->lmc_rxq[i];
1211        sc->lmc_rxq[i] = NULL;
1212        sc->lmc_rxring[i].status = 0;
1213        sc->lmc_rxring[i].length = 0;
1214        sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
1215        if (skb != NULL)
1216            dev_kfree_skb(skb);
1217        sc->lmc_rxq[i] = NULL;
1218    }
1219
1220    for (i = 0; i < LMC_TXDESCS; i++)
1221    {
1222        if (sc->lmc_txq[i] != NULL)
1223            dev_kfree_skb(sc->lmc_txq[i]);
1224        sc->lmc_txq[i] = NULL;
1225    }
1226
1227    lmc_led_off (sc, LMC_MII16_LED_ALL);
1228
1229    netif_wake_queue(dev);
1230    sc->extra_stats.tx_tbusy0++;
1231
1232    lmc_trace(dev, "lmc_ifdown out");
1233
1234    return 0;
1235}
1236
1237/* Interrupt handling routine.  This will take an incoming packet, or clean
1238 * up after a trasmit.
1239 */
1240static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1241{
1242    struct net_device *dev = (struct net_device *) dev_instance;
1243    lmc_softc_t *sc = dev_to_sc(dev);
1244    u32 csr;
1245    int i;
1246    s32 stat;
1247    unsigned int badtx;
1248    u32 firstcsr;
1249    int max_work = LMC_RXDESCS;
1250    int handled = 0;
1251
1252    lmc_trace(dev, "lmc_interrupt in");
1253
1254    spin_lock(&sc->lmc_lock);
1255
1256    /*
1257     * Read the csr to find what interrupts we have (if any)
1258     */
1259    csr = LMC_CSR_READ (sc, csr_status);
1260
1261    /*
1262     * Make sure this is our interrupt
1263     */
1264    if ( ! (csr & sc->lmc_intrmask)) {
1265        goto lmc_int_fail_out;
1266    }
1267
1268    firstcsr = csr;
1269
1270    /* always go through this loop at least once */
1271    while (csr & sc->lmc_intrmask) {
1272	handled = 1;
1273
1274        /*
1275         * Clear interrupt bits, we handle all case below
1276         */
1277        LMC_CSR_WRITE (sc, csr_status, csr);
1278
1279        /*
1280         * One of
1281         *  - Transmit process timed out CSR5<1>
1282         *  - Transmit jabber timeout    CSR5<3>
1283         *  - Transmit underflow         CSR5<5>
1284         *  - Transmit Receiver buffer unavailable CSR5<7>
1285         *  - Receive process stopped    CSR5<8>
1286         *  - Receive watchdog timeout   CSR5<9>
1287         *  - Early transmit interrupt   CSR5<10>
1288         *
1289         * Is this really right? Should we do a running reset for jabber?
1290         * (being a WAN card and all)
1291         */
1292        if (csr & TULIP_STS_ABNRMLINTR){
1293            lmc_running_reset (dev);
1294            break;
1295        }
1296        
1297        if (csr & TULIP_STS_RXINTR){
1298            lmc_trace(dev, "rx interrupt");
1299            lmc_rx (dev);
1300            
1301        }
1302        if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
1303
1304	    int		n_compl = 0 ;
1305            /* reset the transmit timeout detection flag -baz */
1306	    sc->extra_stats.tx_NoCompleteCnt = 0;
1307
1308            badtx = sc->lmc_taint_tx;
1309            i = badtx % LMC_TXDESCS;
1310
1311            while ((badtx < sc->lmc_next_tx)) {
1312                stat = sc->lmc_txring[i].status;
1313
1314                LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
1315						 sc->lmc_txring[i].length);
1316                /*
1317                 * If bit 31 is 1 the tulip owns it break out of the loop
1318                 */
1319                if (stat & 0x80000000)
1320                    break;
1321
1322		n_compl++ ;		/* i.e., have an empty slot in ring */
1323                /*
1324                 * If we have no skbuff or have cleared it
1325                 * Already continue to the next buffer
1326                 */
1327                if (sc->lmc_txq[i] == NULL)
1328                    continue;
1329
1330		/*
1331		 * Check the total error summary to look for any errors
1332		 */
1333		if (stat & 0x8000) {
1334			sc->lmc_device->stats.tx_errors++;
1335			if (stat & 0x4104)
1336				sc->lmc_device->stats.tx_aborted_errors++;
1337			if (stat & 0x0C00)
1338				sc->lmc_device->stats.tx_carrier_errors++;
1339			if (stat & 0x0200)
1340				sc->lmc_device->stats.tx_window_errors++;
1341			if (stat & 0x0002)
1342				sc->lmc_device->stats.tx_fifo_errors++;
1343		} else {
1344			sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
1345
1346			sc->lmc_device->stats.tx_packets++;
1347                }
1348
1349                //                dev_kfree_skb(sc->lmc_txq[i]);
1350                dev_kfree_skb_irq(sc->lmc_txq[i]);
1351                sc->lmc_txq[i] = NULL;
1352
1353                badtx++;
1354                i = badtx % LMC_TXDESCS;
1355            }
1356
1357            if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
1358            {
1359                printk ("%s: out of sync pointer\n", dev->name);
1360                badtx += LMC_TXDESCS;
1361            }
1362            LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
1363            sc->lmc_txfull = 0;
1364            netif_wake_queue(dev);
1365	    sc->extra_stats.tx_tbusy0++;
1366
1367
1368#ifdef DEBUG
1369	    sc->extra_stats.dirtyTx = badtx;
1370	    sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
1371	    sc->extra_stats.lmc_txfull = sc->lmc_txfull;
1372#endif
1373            sc->lmc_taint_tx = badtx;
1374
1375            /*
1376             * Why was there a break here???
1377             */
1378        }			/* end handle transmit interrupt */
1379
1380        if (csr & TULIP_STS_SYSERROR) {
1381            u32 error;
1382            printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
1383            error = csr>>23 & 0x7;
1384            switch(error){
1385            case 0x000:
1386                printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
1387                break;
1388            case 0x001:
1389                printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
1390                break;
1391            case 0x010:
1392                printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
1393                break;
1394            default:
1395                printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
1396            }
1397            lmc_dec_reset (sc);
1398            lmc_reset (sc);
1399            LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1400            LMC_EVENT_LOG(LMC_EVENT_RESET2,
1401                          lmc_mii_readreg (sc, 0, 16),
1402                          lmc_mii_readreg (sc, 0, 17));
1403
1404        }
1405
1406        
1407        if(max_work-- <= 0)
1408            break;
1409        
1410        /*
1411         * Get current csr status to make sure
1412         * we've cleared all interrupts
1413         */
1414        csr = LMC_CSR_READ (sc, csr_status);
1415    }				/* end interrupt loop */
1416    LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
1417
1418lmc_int_fail_out:
1419
1420    spin_unlock(&sc->lmc_lock);
1421
1422    lmc_trace(dev, "lmc_interrupt out");
1423    return IRQ_RETVAL(handled);
1424}
1425
1426static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
1427					struct net_device *dev)
1428{
1429    lmc_softc_t *sc = dev_to_sc(dev);
1430    u32 flag;
1431    int entry;
1432    unsigned long flags;
1433
1434    lmc_trace(dev, "lmc_start_xmit in");
1435
1436    spin_lock_irqsave(&sc->lmc_lock, flags);
1437
1438    /* normal path, tbusy known to be zero */
1439
1440    entry = sc->lmc_next_tx % LMC_TXDESCS;
1441
1442    sc->lmc_txq[entry] = skb;
1443    sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
1444
1445    LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
1446
1447#ifndef GCOM
1448    /* If the queue is less than half full, don't interrupt */
1449    if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
1450    {
1451        /* Do not interrupt on completion of this packet */
1452        flag = 0x60000000;
1453        netif_wake_queue(dev);
1454    }
1455    else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
1456    {
1457        /* This generates an interrupt on completion of this packet */
1458        flag = 0xe0000000;
1459        netif_wake_queue(dev);
1460    }
1461    else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
1462    {
1463        /* Do not interrupt on completion of this packet */
1464        flag = 0x60000000;
1465        netif_wake_queue(dev);
1466    }
1467    else
1468    {
1469        /* This generates an interrupt on completion of this packet */
1470        flag = 0xe0000000;
1471        sc->lmc_txfull = 1;
1472        netif_stop_queue(dev);
1473    }
1474#else
1475    flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
1476
1477    if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
1478    {				/* ring full, go busy */
1479        sc->lmc_txfull = 1;
1480	netif_stop_queue(dev);
1481	sc->extra_stats.tx_tbusy1++;
1482        LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
1483    }
1484#endif
1485
1486
1487    if (entry == LMC_TXDESCS - 1)	/* last descriptor in ring */
1488	flag |= LMC_TDES_END_OF_RING;	/* flag as such for Tulip */
1489
1490    /* don't pad small packets either */
1491    flag = sc->lmc_txring[entry].length = (skb->len) | flag |
1492						sc->TxDescriptControlInit;
1493
1494    /* set the transmit timeout flag to be checked in
1495     * the watchdog timer handler. -baz
1496     */
1497
1498    sc->extra_stats.tx_NoCompleteCnt++;
1499    sc->lmc_next_tx++;
1500
1501    /* give ownership to the chip */
1502    LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
1503    sc->lmc_txring[entry].status = 0x80000000;
1504
1505    /* send now! */
1506    LMC_CSR_WRITE (sc, csr_txpoll, 0);
1507
1508    spin_unlock_irqrestore(&sc->lmc_lock, flags);
1509
1510    lmc_trace(dev, "lmc_start_xmit_out");
1511    return NETDEV_TX_OK;
1512}
1513
1514
1515static int lmc_rx(struct net_device *dev)
1516{
1517    lmc_softc_t *sc = dev_to_sc(dev);
1518    int i;
1519    int rx_work_limit = LMC_RXDESCS;
1520    unsigned int next_rx;
1521    int rxIntLoopCnt;		/* debug -baz */
1522    int localLengthErrCnt = 0;
1523    long stat;
1524    struct sk_buff *skb, *nsb;
1525    u16 len;
1526
1527    lmc_trace(dev, "lmc_rx in");
1528
1529    lmc_led_on(sc, LMC_DS3_LED3);
1530
1531    rxIntLoopCnt = 0;		/* debug -baz */
1532
1533    i = sc->lmc_next_rx % LMC_RXDESCS;
1534    next_rx = sc->lmc_next_rx;
1535
1536    while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
1537    {
1538        rxIntLoopCnt++;		/* debug -baz */
1539        len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
1540        if ((stat & 0x0300) != 0x0300) {  /* Check first segment and last segment */
1541		if ((stat & 0x0000ffff) != 0x7fff) {
1542			/* Oversized frame */
1543			sc->lmc_device->stats.rx_length_errors++;
1544			goto skip_packet;
1545		}
1546	}
1547
1548	if (stat & 0x00000008) { /* Catch a dribbling bit error */
1549		sc->lmc_device->stats.rx_errors++;
1550		sc->lmc_device->stats.rx_frame_errors++;
1551		goto skip_packet;
1552	}
1553
1554
1555	if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
1556		sc->lmc_device->stats.rx_errors++;
1557		sc->lmc_device->stats.rx_crc_errors++;
1558		goto skip_packet;
1559	}
1560
1561	if (len > LMC_PKT_BUF_SZ) {
1562		sc->lmc_device->stats.rx_length_errors++;
1563		localLengthErrCnt++;
1564		goto skip_packet;
1565	}
1566
1567	if (len < sc->lmc_crcSize + 2) {
1568		sc->lmc_device->stats.rx_length_errors++;
1569		sc->extra_stats.rx_SmallPktCnt++;
1570		localLengthErrCnt++;
1571		goto skip_packet;
1572	}
1573
1574        if(stat & 0x00004000){
1575            printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
1576        }
1577
1578        len -= sc->lmc_crcSize;
1579
1580        skb = sc->lmc_rxq[i];
1581
1582        /*
1583         * We ran out of memory at some point
1584         * just allocate an skb buff and continue.
1585         */
1586        
1587        if (!skb) {
1588            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1589            if (nsb) {
1590                sc->lmc_rxq[i] = nsb;
1591                nsb->dev = dev;
1592                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1593            }
1594            sc->failed_recv_alloc = 1;
1595            goto skip_packet;
1596        }
1597        
1598	sc->lmc_device->stats.rx_packets++;
1599	sc->lmc_device->stats.rx_bytes += len;
1600
1601        LMC_CONSOLE_LOG("recv", skb->data, len);
1602
1603        /*
1604         * I'm not sure of the sanity of this
1605         * Packets could be arriving at a constant
1606         * 44.210mbits/sec and we're going to copy
1607         * them into a new buffer??
1608         */
1609        
1610        if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
1611            /*
1612             * If it's a large packet don't copy it just hand it up
1613             */
1614        give_it_anyways:
1615
1616            sc->lmc_rxq[i] = NULL;
1617            sc->lmc_rxring[i].buffer1 = 0x0;
1618
1619            skb_put (skb, len);
1620            skb->protocol = lmc_proto_type(sc, skb);
1621            skb_reset_mac_header(skb);
1622            /* skb_reset_network_header(skb); */
1623            skb->dev = dev;
1624            lmc_proto_netif(sc, skb);
1625
1626            /*
1627             * This skb will be destroyed by the upper layers, make a new one
1628             */
1629            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1630            if (nsb) {
1631                sc->lmc_rxq[i] = nsb;
1632                nsb->dev = dev;
1633                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1634                /* Transferred to 21140 below */
1635            }
1636            else {
1637                /*
1638                 * We've run out of memory, stop trying to allocate
1639                 * memory and exit the interrupt handler
1640                 *
1641                 * The chip may run out of receivers and stop
1642                 * in which care we'll try to allocate the buffer
1643                 * again.  (once a second)
1644                 */
1645		sc->extra_stats.rx_BuffAllocErr++;
1646                LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1647                sc->failed_recv_alloc = 1;
1648                goto skip_out_of_mem;
1649            }
1650        }
1651        else {
1652            nsb = dev_alloc_skb(len);
1653            if(!nsb) {
1654                goto give_it_anyways;
1655            }
1656            skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
1657            
1658            nsb->protocol = lmc_proto_type(sc, nsb);
1659            skb_reset_mac_header(nsb);
1660            /* skb_reset_network_header(nsb); */
1661            nsb->dev = dev;
1662            lmc_proto_netif(sc, nsb);
1663        }
1664
1665    skip_packet:
1666        LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1667        sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
1668
1669        sc->lmc_next_rx++;
1670        i = sc->lmc_next_rx % LMC_RXDESCS;
1671        rx_work_limit--;
1672        if (rx_work_limit < 0)
1673            break;
1674    }
1675
1676    /* detect condition for LMC1000 where DSU cable attaches and fills
1677     * descriptors with bogus packets
1678     *
1679    if (localLengthErrCnt > LMC_RXDESCS - 3) {
1680	sc->extra_stats.rx_BadPktSurgeCnt++;
1681	LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1682		      sc->extra_stats.rx_BadPktSurgeCnt);
1683    } */
1684
1685    /* save max count of receive descriptors serviced */
1686    if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
1687	    sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
1688
1689#ifdef DEBUG
1690    if (rxIntLoopCnt == 0)
1691    {
1692        for (i = 0; i < LMC_RXDESCS; i++)
1693        {
1694            if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
1695                != DESC_OWNED_BY_DC21X4)
1696            {
1697                rxIntLoopCnt++;
1698            }
1699        }
1700        LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
1701    }
1702#endif
1703
1704
1705    lmc_led_off(sc, LMC_DS3_LED3);
1706
1707skip_out_of_mem:
1708
1709    lmc_trace(dev, "lmc_rx out");
1710
1711    return 0;
1712}
1713
1714static struct net_device_stats *lmc_get_stats(struct net_device *dev)
1715{
1716    lmc_softc_t *sc = dev_to_sc(dev);
1717    unsigned long flags;
1718
1719    lmc_trace(dev, "lmc_get_stats in");
1720
1721    spin_lock_irqsave(&sc->lmc_lock, flags);
1722
1723    sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1724
1725    spin_unlock_irqrestore(&sc->lmc_lock, flags);
1726
1727    lmc_trace(dev, "lmc_get_stats out");
1728
1729    return &sc->lmc_device->stats;
1730}
1731
1732static struct pci_driver lmc_driver = {
1733	.name		= "lmc",
1734	.id_table	= lmc_pci_tbl,
1735	.probe		= lmc_init_one,
1736	.remove		= __devexit_p(lmc_remove_one),
1737};
1738
1739module_pci_driver(lmc_driver);
1740
1741unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
1742{
1743    int i;
1744    int command = (0xf6 << 10) | (devaddr << 5) | regno;
1745    int retval = 0;
1746
1747    lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
1748
1749    LMC_MII_SYNC (sc);
1750
1751    lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
1752
1753    for (i = 15; i >= 0; i--)
1754    {
1755        int dataval = (command & (1 << i)) ? 0x20000 : 0;
1756
1757        LMC_CSR_WRITE (sc, csr_9, dataval);
1758        lmc_delay ();
1759        /* __SLOW_DOWN_IO; */
1760        LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
1761        lmc_delay ();
1762        /* __SLOW_DOWN_IO; */
1763    }
1764
1765    lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
1766
1767    for (i = 19; i > 0; i--)
1768    {
1769        LMC_CSR_WRITE (sc, csr_9, 0x40000);
1770        lmc_delay ();
1771        /* __SLOW_DOWN_IO; */
1772        retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
1773        LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
1774        lmc_delay ();
1775        /* __SLOW_DOWN_IO; */
1776    }
1777
1778    lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
1779
1780    return (retval >> 1) & 0xffff;
1781}
1782
1783void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
1784{
1785    int i = 32;
1786    int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
1787
1788    lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
1789
1790    LMC_MII_SYNC (sc);
1791
1792    i = 31;
1793    while (i >= 0)
1794    {
1795        int datav;
1796
1797        if (command & (1 << i))
1798            datav = 0x20000;
1799        else
1800            datav = 0x00000;
1801
1802        LMC_CSR_WRITE (sc, csr_9, datav);
1803        lmc_delay ();
1804        /* __SLOW_DOWN_IO; */
1805        LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
1806        lmc_delay ();
1807        /* __SLOW_DOWN_IO; */
1808        i--;
1809    }
1810
1811    i = 2;
1812    while (i > 0)
1813    {
1814        LMC_CSR_WRITE (sc, csr_9, 0x40000);
1815        lmc_delay ();
1816        /* __SLOW_DOWN_IO; */
1817        LMC_CSR_WRITE (sc, csr_9, 0x50000);
1818        lmc_delay ();
1819        /* __SLOW_DOWN_IO; */
1820        i--;
1821    }
1822
1823    lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
1824}
1825
1826static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1827{
1828    int i;
1829
1830    lmc_trace(sc->lmc_device, "lmc_softreset in");
1831
1832    /* Initialize the receive rings and buffers. */
1833    sc->lmc_txfull = 0;
1834    sc->lmc_next_rx = 0;
1835    sc->lmc_next_tx = 0;
1836    sc->lmc_taint_rx = 0;
1837    sc->lmc_taint_tx = 0;
1838
1839    /*
1840     * Setup each one of the receiver buffers
1841     * allocate an skbuff for each one, setup the descriptor table
1842     * and point each buffer at the next one
1843     */
1844
1845    for (i = 0; i < LMC_RXDESCS; i++)
1846    {
1847        struct sk_buff *skb;
1848
1849        if (sc->lmc_rxq[i] == NULL)
1850        {
1851            skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1852            if(skb == NULL){
1853                printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
1854                sc->failed_ring = 1;
1855                break;
1856            }
1857            else{
1858                sc->lmc_rxq[i] = skb;
1859            }
1860        }
1861        else
1862        {
1863            skb = sc->lmc_rxq[i];
1864        }
1865
1866        skb->dev = sc->lmc_device;
1867
1868        /* owned by 21140 */
1869        sc->lmc_rxring[i].status = 0x80000000;
1870
1871        /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1872        sc->lmc_rxring[i].length = skb_tailroom(skb);
1873
1874        /* use to be tail which is dumb since you're thinking why write
1875         * to the end of the packj,et but since there's nothing there tail == data
1876         */
1877        sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
1878
1879        /* This is fair since the structure is static and we have the next address */
1880        sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
1881
1882    }
1883
1884    /*
1885     * Sets end of ring
1886     */
1887    if (i != 0) {
1888        sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
1889        sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
1890    }
1891    LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
1892
1893    /* Initialize the transmit rings and buffers */
1894    for (i = 0; i < LMC_TXDESCS; i++)
1895    {
1896        if (sc->lmc_txq[i] != NULL){		/* have buffer */
1897            dev_kfree_skb(sc->lmc_txq[i]);	/* free it */
1898	    sc->lmc_device->stats.tx_dropped++;	/* We just dropped a packet */
1899        }
1900        sc->lmc_txq[i] = NULL;
1901        sc->lmc_txring[i].status = 0x00000000;
1902        sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
1903    }
1904    sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
1905    LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
1906
1907    lmc_trace(sc->lmc_device, "lmc_softreset out");
1908}
1909
1910void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1911{
1912    lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
1913    sc->lmc_gpio_io &= ~bits;
1914    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1915    lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
1916}
1917
1918void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1919{
1920    lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
1921    sc->lmc_gpio_io |= bits;
1922    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1923    lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
1924}
1925
1926void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
1927{
1928    lmc_trace(sc->lmc_device, "lmc_led_on in");
1929    if((~sc->lmc_miireg16) & led){ /* Already on! */
1930        lmc_trace(sc->lmc_device, "lmc_led_on aon out");
1931        return;
1932    }
1933    
1934    sc->lmc_miireg16 &= ~led;
1935    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1936    lmc_trace(sc->lmc_device, "lmc_led_on out");
1937}
1938
1939void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
1940{
1941    lmc_trace(sc->lmc_device, "lmc_led_off in");
1942    if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
1943        lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
1944        return;
1945    }
1946    
1947    sc->lmc_miireg16 |= led;
1948    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1949    lmc_trace(sc->lmc_device, "lmc_led_off out");
1950}
1951
1952static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
1953{
1954    lmc_trace(sc->lmc_device, "lmc_reset in");
1955    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
1956    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1957
1958    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
1959    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1960
1961    /*
1962     * make some of the GPIO pins be outputs
1963     */
1964    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
1965
1966    /*
1967     * RESET low to force state reset.  This also forces
1968     * the transmitter clock to be internal, but we expect to reset
1969     * that later anyway.
1970     */
1971    sc->lmc_gpio &= ~(LMC_GEP_RESET);
1972    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
1973
1974    /*
1975     * hold for more than 10 microseconds
1976     */
1977    udelay(50);
1978
1979    /*
1980     * stop driving Xilinx-related signals
1981     */
1982    lmc_gpio_mkinput(sc, LMC_GEP_RESET);
1983
1984    /*
1985     * Call media specific init routine
1986     */
1987    sc->lmc_media->init(sc);
1988
1989    sc->extra_stats.resetCount++;
1990    lmc_trace(sc->lmc_device, "lmc_reset out");
1991}
1992
1993static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
1994{
1995    u32 val;
1996    lmc_trace(sc->lmc_device, "lmc_dec_reset in");
1997
1998    /*
1999     * disable all interrupts
2000     */
2001    sc->lmc_intrmask = 0;
2002    LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
2003
2004    /*
2005     * Reset the chip with a software reset command.
2006     * Wait 10 microseconds (actually 50 PCI cycles but at
2007     * 33MHz that comes to two microseconds but wait a
2008     * bit longer anyways)
2009     */
2010    LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
2011    udelay(25);
2012#ifdef __sparc__
2013    sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
2014    sc->lmc_busmode = 0x00100000;
2015    sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
2016    LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
2017#endif
2018    sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
2019
2020    /*
2021     * We want:
2022     *   no ethernet address in frames we write
2023     *   disable padding (txdesc, padding disable)
2024     *   ignore runt frames (rdes0 bit 15)
2025     *   no receiver watchdog or transmitter jabber timer
2026     *       (csr15 bit 0,14 == 1)
2027     *   if using 16-bit CRC, turn off CRC (trans desc, crc disable)
2028     */
2029
2030    sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
2031                         | TULIP_CMD_FULLDUPLEX
2032                         | TULIP_CMD_PASSBADPKT
2033                         | TULIP_CMD_NOHEARTBEAT
2034                         | TULIP_CMD_PORTSELECT
2035                         | TULIP_CMD_RECEIVEALL
2036                         | TULIP_CMD_MUSTBEONE
2037                       );
2038    sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
2039                          | TULIP_CMD_THRESHOLDCTL
2040                          | TULIP_CMD_STOREFWD
2041                          | TULIP_CMD_TXTHRSHLDCTL
2042                        );
2043
2044    LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
2045
2046    /*
2047     * disable receiver watchdog and transmit jabber
2048     */
2049    val = LMC_CSR_READ(sc, csr_sia_general);
2050    val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
2051    LMC_CSR_WRITE(sc, csr_sia_general, val);
2052
2053    lmc_trace(sc->lmc_device, "lmc_dec_reset out");
2054}
2055
2056static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
2057                         size_t csr_size)
2058{
2059    lmc_trace(sc->lmc_device, "lmc_initcsrs in");
2060    sc->lmc_csrs.csr_busmode	        = csr_base +  0 * csr_size;
2061    sc->lmc_csrs.csr_txpoll		= csr_base +  1 * csr_size;
2062    sc->lmc_csrs.csr_rxpoll		= csr_base +  2 * csr_size;
2063    sc->lmc_csrs.csr_rxlist		= csr_base +  3 * csr_size;
2064    sc->lmc_csrs.csr_txlist		= csr_base +  4 * csr_size;
2065    sc->lmc_csrs.csr_status		= csr_base +  5 * csr_size;
2066    sc->lmc_csrs.csr_command	        = csr_base +  6 * csr_size;
2067    sc->lmc_csrs.csr_intr		= csr_base +  7 * csr_size;
2068    sc->lmc_csrs.csr_missed_frames	= csr_base +  8 * csr_size;
2069    sc->lmc_csrs.csr_9		        = csr_base +  9 * csr_size;
2070    sc->lmc_csrs.csr_10		        = csr_base + 10 * csr_size;
2071    sc->lmc_csrs.csr_11		        = csr_base + 11 * csr_size;
2072    sc->lmc_csrs.csr_12		        = csr_base + 12 * csr_size;
2073    sc->lmc_csrs.csr_13		        = csr_base + 13 * csr_size;
2074    sc->lmc_csrs.csr_14		        = csr_base + 14 * csr_size;
2075    sc->lmc_csrs.csr_15		        = csr_base + 15 * csr_size;
2076    lmc_trace(sc->lmc_device, "lmc_initcsrs out");
2077}
2078
2079static void lmc_driver_timeout(struct net_device *dev)
2080{
2081    lmc_softc_t *sc = dev_to_sc(dev);
2082    u32 csr6;
2083    unsigned long flags;
2084
2085    lmc_trace(dev, "lmc_driver_timeout in");
2086
2087    spin_lock_irqsave(&sc->lmc_lock, flags);
2088
2089    printk("%s: Xmitter busy|\n", dev->name);
2090
2091    sc->extra_stats.tx_tbusy_calls++;
2092    if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
2093	    goto bug_out;
2094
2095    /*
2096     * Chip seems to have locked up
2097     * Reset it
2098     * This whips out all our decriptor
2099     * table and starts from scartch
2100     */
2101
2102    LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
2103                  LMC_CSR_READ (sc, csr_status),
2104		  sc->extra_stats.tx_ProcTimeout);
2105
2106    lmc_running_reset (dev);
2107
2108    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
2109    LMC_EVENT_LOG(LMC_EVENT_RESET2,
2110                  lmc_mii_readreg (sc, 0, 16),
2111                  lmc_mii_readreg (sc, 0, 17));
2112
2113    /* restart the tx processes */
2114    csr6 = LMC_CSR_READ (sc, csr_command);
2115    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
2116    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
2117
2118    /* immediate transmit */
2119    LMC_CSR_WRITE (sc, csr_txpoll, 0);
2120
2121    sc->lmc_device->stats.tx_errors++;
2122    sc->extra_stats.tx_ProcTimeout++; /* -baz */
2123
2124    dev->trans_start = jiffies; /* prevent tx timeout */
2125
2126bug_out:
2127
2128    spin_unlock_irqrestore(&sc->lmc_lock, flags);
2129
2130    lmc_trace(dev, "lmc_driver_timout out");
2131
2132
2133}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
   4  * All rights reserved.  www.lanmedia.com
   5  * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
   6  *
   7  * This code is written by:
   8  * Andrew Stanley-Jones (asj@cban.com)
   9  * Rob Braun (bbraun@vix.com),
  10  * Michael Graff (explorer@vix.com) and
  11  * Matt Thomas (matt@3am-software.com).
  12  *
  13  * With Help By:
  14  * David Boggs
  15  * Ron Crane
  16  * Alan Cox
  17  *
 
 
 
  18  * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
  19  *
  20  * To control link specific options lmcctl is required.
  21  * It can be obtained from ftp.lanmedia.com.
  22  *
  23  * Linux driver notes:
  24  * Linux uses the device struct lmc_private to pass private information
  25  * around.
  26  *
  27  * The initialization portion of this driver (the lmc_reset() and the
  28  * lmc_dec_reset() functions, as well as the led controls and the
  29  * lmc_initcsrs() functions.
  30  *
  31  * The watchdog function runs every second and checks to see if
  32  * we still have link, and that the timing source is what we expected
  33  * it to be.  If link is lost, the interface is marked down, and
  34  * we no longer can transmit.
 
  35  */
  36
  37#include <linux/kernel.h>
  38#include <linux/module.h>
  39#include <linux/string.h>
  40#include <linux/timer.h>
  41#include <linux/ptrace.h>
  42#include <linux/errno.h>
  43#include <linux/ioport.h>
  44#include <linux/slab.h>
  45#include <linux/interrupt.h>
  46#include <linux/pci.h>
  47#include <linux/delay.h>
  48#include <linux/hdlc.h>
 
  49#include <linux/in.h>
  50#include <linux/if_arp.h>
  51#include <linux/netdevice.h>
  52#include <linux/etherdevice.h>
  53#include <linux/skbuff.h>
  54#include <linux/inet.h>
  55#include <linux/bitops.h>
  56#include <asm/processor.h>             /* Processor type for cache alignment. */
  57#include <asm/io.h>
  58#include <asm/dma.h>
  59#include <linux/uaccess.h>
  60//#include <asm/spinlock.h>
  61
  62#define DRIVER_MAJOR_VERSION     1
  63#define DRIVER_MINOR_VERSION    34
  64#define DRIVER_SUB_VERSION       0
  65
  66#define DRIVER_VERSION  ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
  67
  68#include "lmc.h"
  69#include "lmc_var.h"
  70#include "lmc_ioctl.h"
  71#include "lmc_debug.h"
  72#include "lmc_proto.h"
  73
  74static int LMC_PKT_BUF_SZ = 1542;
  75
  76static const struct pci_device_id lmc_pci_tbl[] = {
  77	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
  78	  PCI_VENDOR_ID_LMC, PCI_ANY_ID },
  79	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
  80	  PCI_ANY_ID, PCI_VENDOR_ID_LMC },
  81	{ 0 }
  82};
  83
  84MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
  85MODULE_LICENSE("GPL v2");
  86
  87
  88static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
  89					struct net_device *dev);
  90static int lmc_rx (struct net_device *dev);
  91static int lmc_open(struct net_device *dev);
  92static int lmc_close(struct net_device *dev);
  93static struct net_device_stats *lmc_get_stats(struct net_device *dev);
  94static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
  95static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
  96static void lmc_softreset(lmc_softc_t * const);
  97static void lmc_running_reset(struct net_device *dev);
  98static int lmc_ifdown(struct net_device * const);
  99static void lmc_watchdog(struct timer_list *t);
 100static void lmc_reset(lmc_softc_t * const sc);
 101static void lmc_dec_reset(lmc_softc_t * const sc);
 102static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue);
 103
 104/*
 105 * linux reserves 16 device specific IOCTLs.  We call them
 106 * LMCIOC* to control various bits of our world.
 107 */
 108int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
 109{
 110    lmc_softc_t *sc = dev_to_sc(dev);
 111    lmc_ctl_t ctl;
 112    int ret = -EOPNOTSUPP;
 113    u16 regVal;
 114    unsigned long flags;
 115
 116    lmc_trace(dev, "lmc_ioctl in");
 117
 118    /*
 119     * Most functions mess with the structure
 120     * Disable interrupts while we do the polling
 121     */
 122
 123    switch (cmd) {
 124        /*
 125         * Return current driver state.  Since we keep this up
 126         * To date internally, just copy this out to the user.
 127         */
 128    case LMCIOCGINFO: /*fold01*/
 129	if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
 130		ret = -EFAULT;
 131	else
 132		ret = 0;
 133        break;
 134
 135    case LMCIOCSINFO: /*fold01*/
 136        if (!capable(CAP_NET_ADMIN)) {
 137            ret = -EPERM;
 138            break;
 139        }
 140
 141        if(dev->flags & IFF_UP){
 142            ret = -EBUSY;
 143            break;
 144        }
 145
 146	if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
 147		ret = -EFAULT;
 148		break;
 149	}
 150
 151	spin_lock_irqsave(&sc->lmc_lock, flags);
 152        sc->lmc_media->set_status (sc, &ctl);
 153
 154        if(ctl.crc_length != sc->ictl.crc_length) {
 155            sc->lmc_media->set_crc_length(sc, ctl.crc_length);
 156	    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
 157		sc->TxDescriptControlInit |=  LMC_TDES_ADD_CRC_DISABLE;
 158	    else
 159		sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
 160        }
 161	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 162
 163        ret = 0;
 164        break;
 165
 166    case LMCIOCIFTYPE: /*fold01*/
 167        {
 168	    u16 old_type = sc->if_type;
 169	    u16	new_type;
 170
 171	    if (!capable(CAP_NET_ADMIN)) {
 172		ret = -EPERM;
 173		break;
 174	    }
 175
 176	    if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
 177		ret = -EFAULT;
 178		break;
 179	    }
 180
 181            
 182	    if (new_type == old_type)
 183	    {
 184		ret = 0 ;
 185		break;				/* no change */
 186            }
 187            
 188	    spin_lock_irqsave(&sc->lmc_lock, flags);
 189            lmc_proto_close(sc);
 190
 191            sc->if_type = new_type;
 192            lmc_proto_attach(sc);
 193	    ret = lmc_proto_open(sc);
 194	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 195	    break;
 196	}
 197
 198    case LMCIOCGETXINFO: /*fold01*/
 199	spin_lock_irqsave(&sc->lmc_lock, flags);
 200        sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
 201
 202        sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
 203        sc->lmc_xinfo.PciSlotNumber = 0;
 204        sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
 205        sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
 206        sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
 207        sc->lmc_xinfo.XilinxRevisionNumber =
 208            lmc_mii_readreg (sc, 0, 3) & 0xf;
 209        sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
 210        sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
 211        sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
 212	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 213
 214        sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
 215
 216        if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
 217			 sizeof(struct lmc_xinfo)))
 218		ret = -EFAULT;
 219	else
 220		ret = 0;
 221
 222        break;
 223
 224    case LMCIOCGETLMCSTATS:
 225	    spin_lock_irqsave(&sc->lmc_lock, flags);
 226	    if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
 227		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
 228		    sc->extra_stats.framingBitErrorCount +=
 229			    lmc_mii_readreg(sc, 0, 18) & 0xff;
 230		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
 231		    sc->extra_stats.framingBitErrorCount +=
 232			    (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
 233		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
 234		    sc->extra_stats.lineCodeViolationCount +=
 235			    lmc_mii_readreg(sc, 0, 18) & 0xff;
 236		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
 237		    sc->extra_stats.lineCodeViolationCount +=
 238			    (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
 239		    lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
 240		    regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
 241
 242		    sc->extra_stats.lossOfFrameCount +=
 243			    (regVal & T1FRAMER_LOF_MASK) >> 4;
 244		    sc->extra_stats.changeOfFrameAlignmentCount +=
 245			    (regVal & T1FRAMER_COFA_MASK) >> 2;
 246		    sc->extra_stats.severelyErroredFrameCount +=
 247			    regVal & T1FRAMER_SEF_MASK;
 248	    }
 249	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 250	    if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
 251			     sizeof(sc->lmc_device->stats)) ||
 252		copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
 253			     &sc->extra_stats, sizeof(sc->extra_stats)))
 254		    ret = -EFAULT;
 255	    else
 256		    ret = 0;
 257	    break;
 258
 259    case LMCIOCCLEARLMCSTATS:
 260	    if (!capable(CAP_NET_ADMIN)) {
 261		    ret = -EPERM;
 262		    break;
 263	    }
 264
 265	    spin_lock_irqsave(&sc->lmc_lock, flags);
 266	    memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
 267	    memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
 268	    sc->extra_stats.check = STATCHECK;
 269	    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
 270		    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
 271	    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
 272	    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 273	    ret = 0;
 274	    break;
 275
 276    case LMCIOCSETCIRCUIT: /*fold01*/
 277        if (!capable(CAP_NET_ADMIN)){
 278            ret = -EPERM;
 279            break;
 280        }
 281
 282        if(dev->flags & IFF_UP){
 283            ret = -EBUSY;
 284            break;
 285        }
 286
 287	if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
 288		ret = -EFAULT;
 289		break;
 290	}
 291	spin_lock_irqsave(&sc->lmc_lock, flags);
 292        sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
 293        sc->ictl.circuit_type = ctl.circuit_type;
 294	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 295        ret = 0;
 296
 297        break;
 298
 299    case LMCIOCRESET: /*fold01*/
 300        if (!capable(CAP_NET_ADMIN)){
 301            ret = -EPERM;
 302            break;
 303        }
 304
 305	spin_lock_irqsave(&sc->lmc_lock, flags);
 306        /* Reset driver and bring back to current state */
 307        printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
 308        lmc_running_reset (dev);
 309        printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
 310
 311        LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
 312	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 313
 314        ret = 0;
 315        break;
 316
 317#ifdef DEBUG
 318    case LMCIOCDUMPEVENTLOG:
 319	if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
 320		ret = -EFAULT;
 321		break;
 322	}
 323	if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
 324			 sizeof(lmcEventLogBuf)))
 325		ret = -EFAULT;
 326	else
 327		ret = 0;
 328
 329        break;
 330#endif /* end ifdef _DBG_EVENTLOG */
 331    case LMCIOCT1CONTROL: /*fold01*/
 332        if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
 333            ret = -EOPNOTSUPP;
 334            break;
 335        }
 336        break;
 337    case LMCIOCXILINX: /*fold01*/
 338        {
 339            struct lmc_xilinx_control xc; /*fold02*/
 340
 341            if (!capable(CAP_NET_ADMIN)){
 342                ret = -EPERM;
 343                break;
 344            }
 345
 346            /*
 347             * Stop the xwitter whlie we restart the hardware
 348             */
 349            netif_stop_queue(dev);
 350
 351	    if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
 352		ret = -EFAULT;
 353		break;
 354	    }
 355            switch(xc.command){
 356            case lmc_xilinx_reset: /*fold02*/
 357                {
 358                    u16 mii;
 359		    spin_lock_irqsave(&sc->lmc_lock, flags);
 360                    mii = lmc_mii_readreg (sc, 0, 16);
 361
 362                    /*
 363                     * Make all of them 0 and make input
 364                     */
 365                    lmc_gpio_mkinput(sc, 0xff);
 366
 367                    /*
 368                     * make the reset output
 369                     */
 370                    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
 371
 372                    /*
 373                     * RESET low to force configuration.  This also forces
 374                     * the transmitter clock to be internal, but we expect to reset
 375                     * that later anyway.
 376                     */
 377
 378                    sc->lmc_gpio &= ~LMC_GEP_RESET;
 379                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 380
 381
 382                    /*
 383                     * hold for more than 10 microseconds
 384                     */
 385                    udelay(50);
 386
 387                    sc->lmc_gpio |= LMC_GEP_RESET;
 388                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 389
 390
 391                    /*
 392                     * stop driving Xilinx-related signals
 393                     */
 394                    lmc_gpio_mkinput(sc, 0xff);
 395
 396                    /* Reset the frammer hardware */
 397                    sc->lmc_media->set_link_status (sc, 1);
 398                    sc->lmc_media->set_status (sc, NULL);
 399//                    lmc_softreset(sc);
 400
 401                    {
 402                        int i;
 403                        for(i = 0; i < 5; i++){
 404                            lmc_led_on(sc, LMC_DS3_LED0);
 405                            mdelay(100);
 406                            lmc_led_off(sc, LMC_DS3_LED0);
 407                            lmc_led_on(sc, LMC_DS3_LED1);
 408                            mdelay(100);
 409                            lmc_led_off(sc, LMC_DS3_LED1);
 410                            lmc_led_on(sc, LMC_DS3_LED3);
 411                            mdelay(100);
 412                            lmc_led_off(sc, LMC_DS3_LED3);
 413                            lmc_led_on(sc, LMC_DS3_LED2);
 414                            mdelay(100);
 415                            lmc_led_off(sc, LMC_DS3_LED2);
 416                        }
 417                    }
 418		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 419                    
 420                    
 421
 422                    ret = 0x0;
 423
 424                }
 425
 426                break;
 427            case lmc_xilinx_load_prom: /*fold02*/
 428                {
 429                    u16 mii;
 430                    int timeout = 500000;
 431		    spin_lock_irqsave(&sc->lmc_lock, flags);
 432                    mii = lmc_mii_readreg (sc, 0, 16);
 433
 434                    /*
 435                     * Make all of them 0 and make input
 436                     */
 437                    lmc_gpio_mkinput(sc, 0xff);
 438
 439                    /*
 440                     * make the reset output
 441                     */
 442                    lmc_gpio_mkoutput(sc,  LMC_GEP_DP | LMC_GEP_RESET);
 443
 444                    /*
 445                     * RESET low to force configuration.  This also forces
 446                     * the transmitter clock to be internal, but we expect to reset
 447                     * that later anyway.
 448                     */
 449
 450                    sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
 451                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 452
 453
 454                    /*
 455                     * hold for more than 10 microseconds
 456                     */
 457                    udelay(50);
 458
 459                    sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
 460                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 461
 462                    /*
 463                     * busy wait for the chip to reset
 464                     */
 465                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
 466                           (timeout-- > 0))
 467                        cpu_relax();
 468
 469
 470                    /*
 471                     * stop driving Xilinx-related signals
 472                     */
 473                    lmc_gpio_mkinput(sc, 0xff);
 474		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 475
 476                    ret = 0x0;
 477                    
 478
 479                    break;
 480
 481                }
 482
 483            case lmc_xilinx_load: /*fold02*/
 484                {
 485                    char *data;
 486                    int pos;
 487                    int timeout = 500000;
 488
 489                    if (!xc.data) {
 490                            ret = -EINVAL;
 491                            break;
 492                    }
 493
 494                    data = memdup_user(xc.data, xc.len);
 495                    if (IS_ERR(data)) {
 496                            ret = PTR_ERR(data);
 497                            break;
 498                    }
 
 
 
 
 
 
 
 499
 500                    printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
 501
 502		    spin_lock_irqsave(&sc->lmc_lock, flags);
 503                    lmc_gpio_mkinput(sc, 0xff);
 504
 505                    /*
 506                     * Clear the Xilinx and start prgramming from the DEC
 507                     */
 508
 509                    /*
 510                     * Set ouput as:
 511                     * Reset: 0 (active)
 512                     * DP:    0 (active)
 513                     * Mode:  1
 514                     *
 515                     */
 516                    sc->lmc_gpio = 0x00;
 517                    sc->lmc_gpio &= ~LMC_GEP_DP;
 518                    sc->lmc_gpio &= ~LMC_GEP_RESET;
 519                    sc->lmc_gpio |=  LMC_GEP_MODE;
 520                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 521
 522                    lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
 523
 524                    /*
 525                     * Wait at least 10 us 20 to be safe
 526                     */
 527                    udelay(50);
 528
 529                    /*
 530                     * Clear reset and activate programming lines
 531                     * Reset: Input
 532                     * DP:    Input
 533                     * Clock: Output
 534                     * Data:  Output
 535                     * Mode:  Output
 536                     */
 537                    lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
 538
 539                    /*
 540                     * Set LOAD, DATA, Clock to 1
 541                     */
 542                    sc->lmc_gpio = 0x00;
 543                    sc->lmc_gpio |= LMC_GEP_MODE;
 544                    sc->lmc_gpio |= LMC_GEP_DATA;
 545                    sc->lmc_gpio |= LMC_GEP_CLK;
 546                    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 547                    
 548                    lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
 549
 550                    /*
 551                     * busy wait for the chip to reset
 552                     */
 553                    while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
 554                           (timeout-- > 0))
 555                        cpu_relax();
 556
 557                    printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
 558
 559                    for(pos = 0; pos < xc.len; pos++){
 560                        switch(data[pos]){
 561                        case 0:
 562                            sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
 563                            break;
 564                        case 1:
 565                            sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
 566                            break;
 567                        default:
 568                            printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
 569                            sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
 570                        }
 571                        sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
 572                        sc->lmc_gpio |= LMC_GEP_MODE;
 573                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 574                        udelay(1);
 575                        
 576                        sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
 577                        sc->lmc_gpio |= LMC_GEP_MODE;
 578                        LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
 579                        udelay(1);
 580                    }
 581                    if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
 582                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
 583                    }
 584                    else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
 585                        printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
 586                    }
 587                    else {
 588                        printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
 589                    }
 590
 591                    lmc_gpio_mkinput(sc, 0xff);
 592                    
 593                    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
 594                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
 595
 596                    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
 597                    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
 598		    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 599
 600                    kfree(data);
 601                    
 602                    ret = 0;
 603                    
 604                    break;
 605                }
 606            default: /*fold02*/
 607                ret = -EBADE;
 608                break;
 609            }
 610
 611            netif_wake_queue(dev);
 612            sc->lmc_txfull = 0;
 613
 614        }
 615        break;
 616    default: /*fold01*/
 617        /* If we don't know what to do, give the protocol a shot. */
 618        ret = lmc_proto_ioctl (sc, ifr, cmd);
 619        break;
 620    }
 621
 622    lmc_trace(dev, "lmc_ioctl out");
 623
 624    return ret;
 625}
 626
 627
 628/* the watchdog process that cruises around */
 629static void lmc_watchdog(struct timer_list *t) /*fold00*/
 630{
 631    lmc_softc_t *sc = from_timer(sc, t, timer);
 632    struct net_device *dev = sc->lmc_device;
 633    int link_status;
 634    u32 ticks;
 635    unsigned long flags;
 636
 637    lmc_trace(dev, "lmc_watchdog in");
 638
 639    spin_lock_irqsave(&sc->lmc_lock, flags);
 640
 641    if(sc->check != 0xBEAFCAFE){
 642        printk("LMC: Corrupt net_device struct, breaking out\n");
 643	spin_unlock_irqrestore(&sc->lmc_lock, flags);
 644        return;
 645    }
 646
 647
 648    /* Make sure the tx jabber and rx watchdog are off,
 649     * and the transmit and receive processes are running.
 650     */
 651
 652    LMC_CSR_WRITE (sc, csr_15, 0x00000011);
 653    sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
 654    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
 655
 656    if (sc->lmc_ok == 0)
 657        goto kick_timer;
 658
 659    LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
 660
 661    /* --- begin time out check -----------------------------------
 662     * check for a transmit interrupt timeout
 663     * Has the packet xmt vs xmt serviced threshold been exceeded */
 664    if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
 665	sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
 666	sc->tx_TimeoutInd == 0)
 667    {
 668
 669        /* wait for the watchdog to come around again */
 670        sc->tx_TimeoutInd = 1;
 671    }
 672    else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
 673	     sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
 674	     sc->tx_TimeoutInd)
 675    {
 676
 677        LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
 678
 679        sc->tx_TimeoutDisplay = 1;
 680	sc->extra_stats.tx_TimeoutCnt++;
 681
 682        /* DEC chip is stuck, hit it with a RESET!!!! */
 683        lmc_running_reset (dev);
 684
 685
 686        /* look at receive & transmit process state to make sure they are running */
 687        LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
 688
 689        /* look at: DSR - 02  for Reg 16
 690         *                  CTS - 08
 691         *                  DCD - 10
 692         *                  RI  - 20
 693         * for Reg 17
 694         */
 695        LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
 696
 697        /* reset the transmit timeout detection flag */
 698        sc->tx_TimeoutInd = 0;
 699        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
 700	sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
 701    } else {
 702        sc->tx_TimeoutInd = 0;
 703        sc->lastlmc_taint_tx = sc->lmc_taint_tx;
 704	sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
 705    }
 706
 707    /* --- end time out check ----------------------------------- */
 708
 709
 710    link_status = sc->lmc_media->get_link_status (sc);
 711
 712    /*
 713     * hardware level link lost, but the interface is marked as up.
 714     * Mark it as down.
 715     */
 716    if ((link_status == 0) && (sc->last_link_status != 0)) {
 717        printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
 718        sc->last_link_status = 0;
 719        /* lmc_reset (sc); Why reset??? The link can go down ok */
 720
 721        /* Inform the world that link has been lost */
 722	netif_carrier_off(dev);
 723    }
 724
 725    /*
 726     * hardware link is up, but the interface is marked as down.
 727     * Bring it back up again.
 728     */
 729     if (link_status != 0 && sc->last_link_status == 0) {
 730         printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
 731         sc->last_link_status = 1;
 732         /* lmc_reset (sc); Again why reset??? */
 733
 734	 netif_carrier_on(dev);
 735     }
 736
 737    /* Call media specific watchdog functions */
 738    sc->lmc_media->watchdog(sc);
 739
 740    /*
 741     * Poke the transmitter to make sure it
 742     * never stops, even if we run out of mem
 743     */
 744    LMC_CSR_WRITE(sc, csr_rxpoll, 0);
 745
 746    /*
 747     * Check for code that failed
 748     * and try and fix it as appropriate
 749     */
 750    if(sc->failed_ring == 1){
 751        /*
 752         * Failed to setup the recv/xmit rin
 753         * Try again
 754         */
 755        sc->failed_ring = 0;
 756        lmc_softreset(sc);
 757    }
 758    if(sc->failed_recv_alloc == 1){
 759        /*
 760         * We failed to alloc mem in the
 761         * interrupt handler, go through the rings
 762         * and rebuild them
 763         */
 764        sc->failed_recv_alloc = 0;
 765        lmc_softreset(sc);
 766    }
 767
 768
 769    /*
 770     * remember the timer value
 771     */
 772kick_timer:
 773
 774    ticks = LMC_CSR_READ (sc, csr_gp_timer);
 775    LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
 776    sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
 777
 778    /*
 779     * restart this timer.
 780     */
 781    sc->timer.expires = jiffies + (HZ);
 782    add_timer (&sc->timer);
 783
 784    spin_unlock_irqrestore(&sc->lmc_lock, flags);
 785
 786    lmc_trace(dev, "lmc_watchdog out");
 787
 788}
 789
 790static int lmc_attach(struct net_device *dev, unsigned short encoding,
 791		      unsigned short parity)
 792{
 793	if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
 794		return 0;
 795	return -EINVAL;
 796}
 797
 798static const struct net_device_ops lmc_ops = {
 799	.ndo_open       = lmc_open,
 800	.ndo_stop       = lmc_close,
 
 801	.ndo_start_xmit = hdlc_start_xmit,
 802	.ndo_do_ioctl   = lmc_ioctl,
 803	.ndo_tx_timeout = lmc_driver_timeout,
 804	.ndo_get_stats  = lmc_get_stats,
 805};
 806
 807static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 808{
 809	lmc_softc_t *sc;
 810	struct net_device *dev;
 811	u16 subdevice;
 812	u16 AdapModelNum;
 813	int err;
 814	static int cards_found;
 815
 816	/* lmc_trace(dev, "lmc_init_one in"); */
 817
 818	err = pcim_enable_device(pdev);
 819	if (err) {
 820		printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
 821		return err;
 822	}
 823
 824	err = pci_request_regions(pdev, "lmc");
 825	if (err) {
 826		printk(KERN_ERR "lmc: pci_request_region failed\n");
 827		return err;
 828	}
 829
 830	/*
 831	 * Allocate our own device structure
 832	 */
 833	sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL);
 834	if (!sc)
 835		return -ENOMEM;
 
 
 836
 837	dev = alloc_hdlcdev(sc);
 838	if (!dev) {
 839		printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
 840		return -ENOMEM;
 841	}
 842
 843
 844	dev->type = ARPHRD_HDLC;
 845	dev_to_hdlc(dev)->xmit = lmc_start_xmit;
 846	dev_to_hdlc(dev)->attach = lmc_attach;
 847	dev->netdev_ops = &lmc_ops;
 848	dev->watchdog_timeo = HZ; /* 1 second */
 849	dev->tx_queue_len = 100;
 850	sc->lmc_device = dev;
 851	sc->name = dev->name;
 852	sc->if_type = LMC_PPP;
 853	sc->check = 0xBEAFCAFE;
 854	dev->base_addr = pci_resource_start(pdev, 0);
 855	dev->irq = pdev->irq;
 856	pci_set_drvdata(pdev, dev);
 857	SET_NETDEV_DEV(dev, &pdev->dev);
 858
 859	/*
 860	 * This will get the protocol layer ready and do any 1 time init's
 861	 * Must have a valid sc and dev structure
 862	 */
 863	lmc_proto_attach(sc);
 864
 865	/* Init the spin lock so can call it latter */
 866
 867	spin_lock_init(&sc->lmc_lock);
 868	pci_set_master(pdev);
 869
 870	printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
 871	       dev->base_addr, dev->irq);
 872
 873	err = register_hdlc_device(dev);
 874	if (err) {
 875		printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
 876		free_netdev(dev);
 877		return err;
 878	}
 879
 880    sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
 881    sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
 882
 883    /*
 884     *
 885     * Check either the subvendor or the subdevice, some systems reverse
 886     * the setting in the bois, seems to be version and arch dependent?
 887     * Fix the error, exchange the two values 
 888     */
 889    if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
 890	    subdevice = pdev->subsystem_vendor;
 891
 892    switch (subdevice) {
 893    case PCI_DEVICE_ID_LMC_HSSI:
 894	printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
 895        sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
 896        sc->lmc_media = &lmc_hssi_media;
 897        break;
 898    case PCI_DEVICE_ID_LMC_DS3:
 899	printk(KERN_INFO "%s: LMC DS3\n", dev->name);
 900        sc->lmc_cardtype = LMC_CARDTYPE_DS3;
 901        sc->lmc_media = &lmc_ds3_media;
 902        break;
 903    case PCI_DEVICE_ID_LMC_SSI:
 904	printk(KERN_INFO "%s: LMC SSI\n", dev->name);
 905        sc->lmc_cardtype = LMC_CARDTYPE_SSI;
 906        sc->lmc_media = &lmc_ssi_media;
 907        break;
 908    case PCI_DEVICE_ID_LMC_T1:
 909	printk(KERN_INFO "%s: LMC T1\n", dev->name);
 910        sc->lmc_cardtype = LMC_CARDTYPE_T1;
 911        sc->lmc_media = &lmc_t1_media;
 912        break;
 913    default:
 914	printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
 915        break;
 916    }
 917
 918    lmc_initcsrs (sc, dev->base_addr, 8);
 919
 920    lmc_gpio_mkinput (sc, 0xff);
 921    sc->lmc_gpio = 0;		/* drive no signals yet */
 922
 923    sc->lmc_media->defaults (sc);
 924
 925    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
 926
 927    /* verify that the PCI Sub System ID matches the Adapter Model number
 928     * from the MII register
 929     */
 930    AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
 931
 932    if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
 933	 subdevice != PCI_DEVICE_ID_LMC_T1) &&
 934	(AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
 935	 subdevice != PCI_DEVICE_ID_LMC_SSI) &&
 936	(AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
 937	 subdevice != PCI_DEVICE_ID_LMC_DS3) &&
 938	(AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
 939	 subdevice != PCI_DEVICE_ID_LMC_HSSI))
 940	    printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
 941		   " Subsystem ID = 0x%04x\n",
 942		   dev->name, AdapModelNum, subdevice);
 943
 944    /*
 945     * reset clock
 946     */
 947    LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
 948
 949    sc->board_idx = cards_found++;
 950    sc->extra_stats.check = STATCHECK;
 951    sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
 952	    sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
 953    sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
 954
 955    sc->lmc_ok = 0;
 956    sc->last_link_status = 0;
 957
 958    lmc_trace(dev, "lmc_init_one out");
 959    return 0;
 
 
 
 
 
 
 
 
 
 960}
 961
 962/*
 963 * Called from pci when removing module.
 964 */
 965static void lmc_remove_one(struct pci_dev *pdev)
 966{
 967	struct net_device *dev = pci_get_drvdata(pdev);
 968
 969	if (dev) {
 970		printk(KERN_DEBUG "%s: removing...\n", dev->name);
 971		unregister_hdlc_device(dev);
 972		free_netdev(dev);
 
 
 
 973	}
 974}
 975
 976/* After this is called, packets can be sent.
 977 * Does not initialize the addresses
 978 */
 979static int lmc_open(struct net_device *dev)
 980{
 981    lmc_softc_t *sc = dev_to_sc(dev);
 982    int err;
 983
 984    lmc_trace(dev, "lmc_open in");
 985
 986    lmc_led_on(sc, LMC_DS3_LED0);
 987
 988    lmc_dec_reset(sc);
 989    lmc_reset(sc);
 990
 991    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
 992    LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
 993		  lmc_mii_readreg(sc, 0, 17));
 994
 995    if (sc->lmc_ok){
 996        lmc_trace(dev, "lmc_open lmc_ok out");
 997        return 0;
 998    }
 999
1000    lmc_softreset (sc);
1001
1002    /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1003    if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
1004        printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
1005        lmc_trace(dev, "lmc_open irq failed out");
1006        return -EAGAIN;
1007    }
1008    sc->got_irq = 1;
1009
1010    /* Assert Terminal Active */
1011    sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
1012    sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
1013
1014    /*
1015     * reset to last state.
1016     */
1017    sc->lmc_media->set_status (sc, NULL);
1018
1019    /* setup default bits to be used in tulip_desc_t transmit descriptor
1020     * -baz */
1021    sc->TxDescriptControlInit = (
1022                                 LMC_TDES_INTERRUPT_ON_COMPLETION
1023                                 | LMC_TDES_FIRST_SEGMENT
1024                                 | LMC_TDES_LAST_SEGMENT
1025                                 | LMC_TDES_SECOND_ADDR_CHAINED
1026                                 | LMC_TDES_DISABLE_PADDING
1027                                );
1028
1029    if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
1030        /* disable 32 bit CRC generated by ASIC */
1031        sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
1032    }
1033    sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
1034    /* Acknoledge the Terminal Active and light LEDs */
1035
1036    /* dev->flags |= IFF_UP; */
1037
1038    if ((err = lmc_proto_open(sc)) != 0)
1039	    return err;
1040
1041    netif_start_queue(dev);
1042    sc->extra_stats.tx_tbusy0++;
1043
1044    /*
1045     * select what interrupts we want to get
1046     */
1047    sc->lmc_intrmask = 0;
1048    /* Should be using the default interrupt mask defined in the .h file. */
1049    sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
1050                         | TULIP_STS_RXINTR
1051                         | TULIP_STS_TXINTR
1052                         | TULIP_STS_ABNRMLINTR
1053                         | TULIP_STS_SYSERROR
1054                         | TULIP_STS_TXSTOPPED
1055                         | TULIP_STS_TXUNDERFLOW
1056                         | TULIP_STS_RXSTOPPED
1057		         | TULIP_STS_RXNOBUF
1058                        );
1059    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1060
1061    sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
1062    sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
1063    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1064
1065    sc->lmc_ok = 1; /* Run watchdog */
1066
1067    /*
1068     * Set the if up now - pfb
1069     */
1070
1071    sc->last_link_status = 1;
1072
1073    /*
1074     * Setup a timer for the watchdog on probe, and start it running.
1075     * Since lmc_ok == 0, it will be a NOP for now.
1076     */
1077    timer_setup(&sc->timer, lmc_watchdog, 0);
1078    sc->timer.expires = jiffies + HZ;
 
 
1079    add_timer (&sc->timer);
1080
1081    lmc_trace(dev, "lmc_open out");
1082
1083    return 0;
1084}
1085
1086/* Total reset to compensate for the AdTran DSU doing bad things
1087 *  under heavy load
1088 */
1089
1090static void lmc_running_reset (struct net_device *dev) /*fold00*/
1091{
1092    lmc_softc_t *sc = dev_to_sc(dev);
1093
1094    lmc_trace(dev, "lmc_running_reset in");
1095
1096    /* stop interrupts */
1097    /* Clear the interrupt mask */
1098    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1099
1100    lmc_dec_reset (sc);
1101    lmc_reset (sc);
1102    lmc_softreset (sc);
1103    /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1104    sc->lmc_media->set_link_status (sc, 1);
1105    sc->lmc_media->set_status (sc, NULL);
1106
1107    netif_wake_queue(dev);
1108
1109    sc->lmc_txfull = 0;
1110    sc->extra_stats.tx_tbusy0++;
1111
1112    sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
1113    LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1114
1115    sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
1116    LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1117
1118    lmc_trace(dev, "lmc_running_reset_out");
1119}
1120
1121
1122/* This is what is called when you ifconfig down a device.
1123 * This disables the timer for the watchdog and keepalives,
1124 * and disables the irq for dev.
1125 */
1126static int lmc_close(struct net_device *dev)
1127{
1128    /* not calling release_region() as we should */
1129    lmc_softc_t *sc = dev_to_sc(dev);
1130
1131    lmc_trace(dev, "lmc_close in");
1132
1133    sc->lmc_ok = 0;
1134    sc->lmc_media->set_link_status (sc, 0);
1135    del_timer (&sc->timer);
1136    lmc_proto_close(sc);
1137    lmc_ifdown (dev);
1138
1139    lmc_trace(dev, "lmc_close out");
1140
1141    return 0;
1142}
1143
1144/* Ends the transfer of packets */
1145/* When the interface goes down, this is called */
1146static int lmc_ifdown (struct net_device *dev) /*fold00*/
1147{
1148    lmc_softc_t *sc = dev_to_sc(dev);
1149    u32 csr6;
1150    int i;
1151
1152    lmc_trace(dev, "lmc_ifdown in");
1153
1154    /* Don't let anything else go on right now */
1155    //    dev->start = 0;
1156    netif_stop_queue(dev);
1157    sc->extra_stats.tx_tbusy1++;
1158
1159    /* stop interrupts */
1160    /* Clear the interrupt mask */
1161    LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1162
1163    /* Stop Tx and Rx on the chip */
1164    csr6 = LMC_CSR_READ (sc, csr_command);
1165    csr6 &= ~LMC_DEC_ST;		/* Turn off the Transmission bit */
1166    csr6 &= ~LMC_DEC_SR;		/* Turn off the Receive bit */
1167    LMC_CSR_WRITE (sc, csr_command, csr6);
1168
1169    sc->lmc_device->stats.rx_missed_errors +=
1170	    LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1171
1172    /* release the interrupt */
1173    if(sc->got_irq == 1){
1174        free_irq (dev->irq, dev);
1175        sc->got_irq = 0;
1176    }
1177
1178    /* free skbuffs in the Rx queue */
1179    for (i = 0; i < LMC_RXDESCS; i++)
1180    {
1181        struct sk_buff *skb = sc->lmc_rxq[i];
1182        sc->lmc_rxq[i] = NULL;
1183        sc->lmc_rxring[i].status = 0;
1184        sc->lmc_rxring[i].length = 0;
1185        sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
1186        if (skb != NULL)
1187            dev_kfree_skb(skb);
1188        sc->lmc_rxq[i] = NULL;
1189    }
1190
1191    for (i = 0; i < LMC_TXDESCS; i++)
1192    {
1193        if (sc->lmc_txq[i] != NULL)
1194            dev_kfree_skb(sc->lmc_txq[i]);
1195        sc->lmc_txq[i] = NULL;
1196    }
1197
1198    lmc_led_off (sc, LMC_MII16_LED_ALL);
1199
1200    netif_wake_queue(dev);
1201    sc->extra_stats.tx_tbusy0++;
1202
1203    lmc_trace(dev, "lmc_ifdown out");
1204
1205    return 0;
1206}
1207
1208/* Interrupt handling routine.  This will take an incoming packet, or clean
1209 * up after a trasmit.
1210 */
1211static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1212{
1213    struct net_device *dev = (struct net_device *) dev_instance;
1214    lmc_softc_t *sc = dev_to_sc(dev);
1215    u32 csr;
1216    int i;
1217    s32 stat;
1218    unsigned int badtx;
1219    u32 firstcsr;
1220    int max_work = LMC_RXDESCS;
1221    int handled = 0;
1222
1223    lmc_trace(dev, "lmc_interrupt in");
1224
1225    spin_lock(&sc->lmc_lock);
1226
1227    /*
1228     * Read the csr to find what interrupts we have (if any)
1229     */
1230    csr = LMC_CSR_READ (sc, csr_status);
1231
1232    /*
1233     * Make sure this is our interrupt
1234     */
1235    if ( ! (csr & sc->lmc_intrmask)) {
1236        goto lmc_int_fail_out;
1237    }
1238
1239    firstcsr = csr;
1240
1241    /* always go through this loop at least once */
1242    while (csr & sc->lmc_intrmask) {
1243	handled = 1;
1244
1245        /*
1246         * Clear interrupt bits, we handle all case below
1247         */
1248        LMC_CSR_WRITE (sc, csr_status, csr);
1249
1250        /*
1251         * One of
1252         *  - Transmit process timed out CSR5<1>
1253         *  - Transmit jabber timeout    CSR5<3>
1254         *  - Transmit underflow         CSR5<5>
1255         *  - Transmit Receiver buffer unavailable CSR5<7>
1256         *  - Receive process stopped    CSR5<8>
1257         *  - Receive watchdog timeout   CSR5<9>
1258         *  - Early transmit interrupt   CSR5<10>
1259         *
1260         * Is this really right? Should we do a running reset for jabber?
1261         * (being a WAN card and all)
1262         */
1263        if (csr & TULIP_STS_ABNRMLINTR){
1264            lmc_running_reset (dev);
1265            break;
1266        }
1267        
1268        if (csr & TULIP_STS_RXINTR){
1269            lmc_trace(dev, "rx interrupt");
1270            lmc_rx (dev);
1271            
1272        }
1273        if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
1274
1275	    int		n_compl = 0 ;
1276            /* reset the transmit timeout detection flag -baz */
1277	    sc->extra_stats.tx_NoCompleteCnt = 0;
1278
1279            badtx = sc->lmc_taint_tx;
1280            i = badtx % LMC_TXDESCS;
1281
1282            while ((badtx < sc->lmc_next_tx)) {
1283                stat = sc->lmc_txring[i].status;
1284
1285                LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
1286						 sc->lmc_txring[i].length);
1287                /*
1288                 * If bit 31 is 1 the tulip owns it break out of the loop
1289                 */
1290                if (stat & 0x80000000)
1291                    break;
1292
1293		n_compl++ ;		/* i.e., have an empty slot in ring */
1294                /*
1295                 * If we have no skbuff or have cleared it
1296                 * Already continue to the next buffer
1297                 */
1298                if (sc->lmc_txq[i] == NULL)
1299                    continue;
1300
1301		/*
1302		 * Check the total error summary to look for any errors
1303		 */
1304		if (stat & 0x8000) {
1305			sc->lmc_device->stats.tx_errors++;
1306			if (stat & 0x4104)
1307				sc->lmc_device->stats.tx_aborted_errors++;
1308			if (stat & 0x0C00)
1309				sc->lmc_device->stats.tx_carrier_errors++;
1310			if (stat & 0x0200)
1311				sc->lmc_device->stats.tx_window_errors++;
1312			if (stat & 0x0002)
1313				sc->lmc_device->stats.tx_fifo_errors++;
1314		} else {
1315			sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
1316
1317			sc->lmc_device->stats.tx_packets++;
1318                }
1319
1320		dev_consume_skb_irq(sc->lmc_txq[i]);
 
1321                sc->lmc_txq[i] = NULL;
1322
1323                badtx++;
1324                i = badtx % LMC_TXDESCS;
1325            }
1326
1327            if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
1328            {
1329                printk ("%s: out of sync pointer\n", dev->name);
1330                badtx += LMC_TXDESCS;
1331            }
1332            LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
1333            sc->lmc_txfull = 0;
1334            netif_wake_queue(dev);
1335	    sc->extra_stats.tx_tbusy0++;
1336
1337
1338#ifdef DEBUG
1339	    sc->extra_stats.dirtyTx = badtx;
1340	    sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
1341	    sc->extra_stats.lmc_txfull = sc->lmc_txfull;
1342#endif
1343            sc->lmc_taint_tx = badtx;
1344
1345            /*
1346             * Why was there a break here???
1347             */
1348        }			/* end handle transmit interrupt */
1349
1350        if (csr & TULIP_STS_SYSERROR) {
1351            u32 error;
1352            printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
1353            error = csr>>23 & 0x7;
1354            switch(error){
1355            case 0x000:
1356                printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
1357                break;
1358            case 0x001:
1359                printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
1360                break;
1361            case 0x002:
1362                printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
1363                break;
1364            default:
1365                printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
1366            }
1367            lmc_dec_reset (sc);
1368            lmc_reset (sc);
1369            LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1370            LMC_EVENT_LOG(LMC_EVENT_RESET2,
1371                          lmc_mii_readreg (sc, 0, 16),
1372                          lmc_mii_readreg (sc, 0, 17));
1373
1374        }
1375
1376        
1377        if(max_work-- <= 0)
1378            break;
1379        
1380        /*
1381         * Get current csr status to make sure
1382         * we've cleared all interrupts
1383         */
1384        csr = LMC_CSR_READ (sc, csr_status);
1385    }				/* end interrupt loop */
1386    LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
1387
1388lmc_int_fail_out:
1389
1390    spin_unlock(&sc->lmc_lock);
1391
1392    lmc_trace(dev, "lmc_interrupt out");
1393    return IRQ_RETVAL(handled);
1394}
1395
1396static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
1397					struct net_device *dev)
1398{
1399    lmc_softc_t *sc = dev_to_sc(dev);
1400    u32 flag;
1401    int entry;
1402    unsigned long flags;
1403
1404    lmc_trace(dev, "lmc_start_xmit in");
1405
1406    spin_lock_irqsave(&sc->lmc_lock, flags);
1407
1408    /* normal path, tbusy known to be zero */
1409
1410    entry = sc->lmc_next_tx % LMC_TXDESCS;
1411
1412    sc->lmc_txq[entry] = skb;
1413    sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
1414
1415    LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
1416
1417#ifndef GCOM
1418    /* If the queue is less than half full, don't interrupt */
1419    if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
1420    {
1421        /* Do not interrupt on completion of this packet */
1422        flag = 0x60000000;
1423        netif_wake_queue(dev);
1424    }
1425    else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
1426    {
1427        /* This generates an interrupt on completion of this packet */
1428        flag = 0xe0000000;
1429        netif_wake_queue(dev);
1430    }
1431    else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
1432    {
1433        /* Do not interrupt on completion of this packet */
1434        flag = 0x60000000;
1435        netif_wake_queue(dev);
1436    }
1437    else
1438    {
1439        /* This generates an interrupt on completion of this packet */
1440        flag = 0xe0000000;
1441        sc->lmc_txfull = 1;
1442        netif_stop_queue(dev);
1443    }
1444#else
1445    flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
1446
1447    if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
1448    {				/* ring full, go busy */
1449        sc->lmc_txfull = 1;
1450	netif_stop_queue(dev);
1451	sc->extra_stats.tx_tbusy1++;
1452        LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
1453    }
1454#endif
1455
1456
1457    if (entry == LMC_TXDESCS - 1)	/* last descriptor in ring */
1458	flag |= LMC_TDES_END_OF_RING;	/* flag as such for Tulip */
1459
1460    /* don't pad small packets either */
1461    flag = sc->lmc_txring[entry].length = (skb->len) | flag |
1462						sc->TxDescriptControlInit;
1463
1464    /* set the transmit timeout flag to be checked in
1465     * the watchdog timer handler. -baz
1466     */
1467
1468    sc->extra_stats.tx_NoCompleteCnt++;
1469    sc->lmc_next_tx++;
1470
1471    /* give ownership to the chip */
1472    LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
1473    sc->lmc_txring[entry].status = 0x80000000;
1474
1475    /* send now! */
1476    LMC_CSR_WRITE (sc, csr_txpoll, 0);
1477
1478    spin_unlock_irqrestore(&sc->lmc_lock, flags);
1479
1480    lmc_trace(dev, "lmc_start_xmit_out");
1481    return NETDEV_TX_OK;
1482}
1483
1484
1485static int lmc_rx(struct net_device *dev)
1486{
1487    lmc_softc_t *sc = dev_to_sc(dev);
1488    int i;
1489    int rx_work_limit = LMC_RXDESCS;
 
1490    int rxIntLoopCnt;		/* debug -baz */
1491    int localLengthErrCnt = 0;
1492    long stat;
1493    struct sk_buff *skb, *nsb;
1494    u16 len;
1495
1496    lmc_trace(dev, "lmc_rx in");
1497
1498    lmc_led_on(sc, LMC_DS3_LED3);
1499
1500    rxIntLoopCnt = 0;		/* debug -baz */
1501
1502    i = sc->lmc_next_rx % LMC_RXDESCS;
 
1503
1504    while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
1505    {
1506        rxIntLoopCnt++;		/* debug -baz */
1507        len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
1508        if ((stat & 0x0300) != 0x0300) {  /* Check first segment and last segment */
1509		if ((stat & 0x0000ffff) != 0x7fff) {
1510			/* Oversized frame */
1511			sc->lmc_device->stats.rx_length_errors++;
1512			goto skip_packet;
1513		}
1514	}
1515
1516	if (stat & 0x00000008) { /* Catch a dribbling bit error */
1517		sc->lmc_device->stats.rx_errors++;
1518		sc->lmc_device->stats.rx_frame_errors++;
1519		goto skip_packet;
1520	}
1521
1522
1523	if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
1524		sc->lmc_device->stats.rx_errors++;
1525		sc->lmc_device->stats.rx_crc_errors++;
1526		goto skip_packet;
1527	}
1528
1529	if (len > LMC_PKT_BUF_SZ) {
1530		sc->lmc_device->stats.rx_length_errors++;
1531		localLengthErrCnt++;
1532		goto skip_packet;
1533	}
1534
1535	if (len < sc->lmc_crcSize + 2) {
1536		sc->lmc_device->stats.rx_length_errors++;
1537		sc->extra_stats.rx_SmallPktCnt++;
1538		localLengthErrCnt++;
1539		goto skip_packet;
1540	}
1541
1542        if(stat & 0x00004000){
1543            printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
1544        }
1545
1546        len -= sc->lmc_crcSize;
1547
1548        skb = sc->lmc_rxq[i];
1549
1550        /*
1551         * We ran out of memory at some point
1552         * just allocate an skb buff and continue.
1553         */
1554        
1555        if (!skb) {
1556            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1557            if (nsb) {
1558                sc->lmc_rxq[i] = nsb;
1559                nsb->dev = dev;
1560                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1561            }
1562            sc->failed_recv_alloc = 1;
1563            goto skip_packet;
1564        }
1565        
1566	sc->lmc_device->stats.rx_packets++;
1567	sc->lmc_device->stats.rx_bytes += len;
1568
1569        LMC_CONSOLE_LOG("recv", skb->data, len);
1570
1571        /*
1572         * I'm not sure of the sanity of this
1573         * Packets could be arriving at a constant
1574         * 44.210mbits/sec and we're going to copy
1575         * them into a new buffer??
1576         */
1577        
1578        if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
1579            /*
1580             * If it's a large packet don't copy it just hand it up
1581             */
1582        give_it_anyways:
1583
1584            sc->lmc_rxq[i] = NULL;
1585            sc->lmc_rxring[i].buffer1 = 0x0;
1586
1587            skb_put (skb, len);
1588            skb->protocol = lmc_proto_type(sc, skb);
1589            skb_reset_mac_header(skb);
1590            /* skb_reset_network_header(skb); */
1591            skb->dev = dev;
1592            lmc_proto_netif(sc, skb);
1593
1594            /*
1595             * This skb will be destroyed by the upper layers, make a new one
1596             */
1597            nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1598            if (nsb) {
1599                sc->lmc_rxq[i] = nsb;
1600                nsb->dev = dev;
1601                sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1602                /* Transferred to 21140 below */
1603            }
1604            else {
1605                /*
1606                 * We've run out of memory, stop trying to allocate
1607                 * memory and exit the interrupt handler
1608                 *
1609                 * The chip may run out of receivers and stop
1610                 * in which care we'll try to allocate the buffer
1611                 * again.  (once a second)
1612                 */
1613		sc->extra_stats.rx_BuffAllocErr++;
1614                LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1615                sc->failed_recv_alloc = 1;
1616                goto skip_out_of_mem;
1617            }
1618        }
1619        else {
1620            nsb = dev_alloc_skb(len);
1621            if(!nsb) {
1622                goto give_it_anyways;
1623            }
1624            skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
1625            
1626            nsb->protocol = lmc_proto_type(sc, nsb);
1627            skb_reset_mac_header(nsb);
1628            /* skb_reset_network_header(nsb); */
1629            nsb->dev = dev;
1630            lmc_proto_netif(sc, nsb);
1631        }
1632
1633    skip_packet:
1634        LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1635        sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
1636
1637        sc->lmc_next_rx++;
1638        i = sc->lmc_next_rx % LMC_RXDESCS;
1639        rx_work_limit--;
1640        if (rx_work_limit < 0)
1641            break;
1642    }
1643
1644    /* detect condition for LMC1000 where DSU cable attaches and fills
1645     * descriptors with bogus packets
1646     *
1647    if (localLengthErrCnt > LMC_RXDESCS - 3) {
1648	sc->extra_stats.rx_BadPktSurgeCnt++;
1649	LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1650		      sc->extra_stats.rx_BadPktSurgeCnt);
1651    } */
1652
1653    /* save max count of receive descriptors serviced */
1654    if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
1655	    sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
1656
1657#ifdef DEBUG
1658    if (rxIntLoopCnt == 0)
1659    {
1660        for (i = 0; i < LMC_RXDESCS; i++)
1661        {
1662            if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
1663                != DESC_OWNED_BY_DC21X4)
1664            {
1665                rxIntLoopCnt++;
1666            }
1667        }
1668        LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
1669    }
1670#endif
1671
1672
1673    lmc_led_off(sc, LMC_DS3_LED3);
1674
1675skip_out_of_mem:
1676
1677    lmc_trace(dev, "lmc_rx out");
1678
1679    return 0;
1680}
1681
1682static struct net_device_stats *lmc_get_stats(struct net_device *dev)
1683{
1684    lmc_softc_t *sc = dev_to_sc(dev);
1685    unsigned long flags;
1686
1687    lmc_trace(dev, "lmc_get_stats in");
1688
1689    spin_lock_irqsave(&sc->lmc_lock, flags);
1690
1691    sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1692
1693    spin_unlock_irqrestore(&sc->lmc_lock, flags);
1694
1695    lmc_trace(dev, "lmc_get_stats out");
1696
1697    return &sc->lmc_device->stats;
1698}
1699
1700static struct pci_driver lmc_driver = {
1701	.name		= "lmc",
1702	.id_table	= lmc_pci_tbl,
1703	.probe		= lmc_init_one,
1704	.remove		= lmc_remove_one,
1705};
1706
1707module_pci_driver(lmc_driver);
1708
1709unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
1710{
1711    int i;
1712    int command = (0xf6 << 10) | (devaddr << 5) | regno;
1713    int retval = 0;
1714
1715    lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
1716
1717    LMC_MII_SYNC (sc);
1718
1719    lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
1720
1721    for (i = 15; i >= 0; i--)
1722    {
1723        int dataval = (command & (1 << i)) ? 0x20000 : 0;
1724
1725        LMC_CSR_WRITE (sc, csr_9, dataval);
1726        lmc_delay ();
1727        /* __SLOW_DOWN_IO; */
1728        LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
1729        lmc_delay ();
1730        /* __SLOW_DOWN_IO; */
1731    }
1732
1733    lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
1734
1735    for (i = 19; i > 0; i--)
1736    {
1737        LMC_CSR_WRITE (sc, csr_9, 0x40000);
1738        lmc_delay ();
1739        /* __SLOW_DOWN_IO; */
1740        retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
1741        LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
1742        lmc_delay ();
1743        /* __SLOW_DOWN_IO; */
1744    }
1745
1746    lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
1747
1748    return (retval >> 1) & 0xffff;
1749}
1750
1751void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
1752{
1753    int i = 32;
1754    int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
1755
1756    lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
1757
1758    LMC_MII_SYNC (sc);
1759
1760    i = 31;
1761    while (i >= 0)
1762    {
1763        int datav;
1764
1765        if (command & (1 << i))
1766            datav = 0x20000;
1767        else
1768            datav = 0x00000;
1769
1770        LMC_CSR_WRITE (sc, csr_9, datav);
1771        lmc_delay ();
1772        /* __SLOW_DOWN_IO; */
1773        LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
1774        lmc_delay ();
1775        /* __SLOW_DOWN_IO; */
1776        i--;
1777    }
1778
1779    i = 2;
1780    while (i > 0)
1781    {
1782        LMC_CSR_WRITE (sc, csr_9, 0x40000);
1783        lmc_delay ();
1784        /* __SLOW_DOWN_IO; */
1785        LMC_CSR_WRITE (sc, csr_9, 0x50000);
1786        lmc_delay ();
1787        /* __SLOW_DOWN_IO; */
1788        i--;
1789    }
1790
1791    lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
1792}
1793
1794static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1795{
1796    int i;
1797
1798    lmc_trace(sc->lmc_device, "lmc_softreset in");
1799
1800    /* Initialize the receive rings and buffers. */
1801    sc->lmc_txfull = 0;
1802    sc->lmc_next_rx = 0;
1803    sc->lmc_next_tx = 0;
1804    sc->lmc_taint_rx = 0;
1805    sc->lmc_taint_tx = 0;
1806
1807    /*
1808     * Setup each one of the receiver buffers
1809     * allocate an skbuff for each one, setup the descriptor table
1810     * and point each buffer at the next one
1811     */
1812
1813    for (i = 0; i < LMC_RXDESCS; i++)
1814    {
1815        struct sk_buff *skb;
1816
1817        if (sc->lmc_rxq[i] == NULL)
1818        {
1819            skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1820            if(skb == NULL){
1821                printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
1822                sc->failed_ring = 1;
1823                break;
1824            }
1825            else{
1826                sc->lmc_rxq[i] = skb;
1827            }
1828        }
1829        else
1830        {
1831            skb = sc->lmc_rxq[i];
1832        }
1833
1834        skb->dev = sc->lmc_device;
1835
1836        /* owned by 21140 */
1837        sc->lmc_rxring[i].status = 0x80000000;
1838
1839        /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1840        sc->lmc_rxring[i].length = skb_tailroom(skb);
1841
1842        /* use to be tail which is dumb since you're thinking why write
1843         * to the end of the packj,et but since there's nothing there tail == data
1844         */
1845        sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
1846
1847        /* This is fair since the structure is static and we have the next address */
1848        sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
1849
1850    }
1851
1852    /*
1853     * Sets end of ring
1854     */
1855    if (i != 0) {
1856        sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
1857        sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
1858    }
1859    LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
1860
1861    /* Initialize the transmit rings and buffers */
1862    for (i = 0; i < LMC_TXDESCS; i++)
1863    {
1864        if (sc->lmc_txq[i] != NULL){		/* have buffer */
1865            dev_kfree_skb(sc->lmc_txq[i]);	/* free it */
1866	    sc->lmc_device->stats.tx_dropped++;	/* We just dropped a packet */
1867        }
1868        sc->lmc_txq[i] = NULL;
1869        sc->lmc_txring[i].status = 0x00000000;
1870        sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
1871    }
1872    sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
1873    LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
1874
1875    lmc_trace(sc->lmc_device, "lmc_softreset out");
1876}
1877
1878void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1879{
1880    lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
1881    sc->lmc_gpio_io &= ~bits;
1882    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1883    lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
1884}
1885
1886void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1887{
1888    lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
1889    sc->lmc_gpio_io |= bits;
1890    LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1891    lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
1892}
1893
1894void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
1895{
1896    lmc_trace(sc->lmc_device, "lmc_led_on in");
1897    if((~sc->lmc_miireg16) & led){ /* Already on! */
1898        lmc_trace(sc->lmc_device, "lmc_led_on aon out");
1899        return;
1900    }
1901    
1902    sc->lmc_miireg16 &= ~led;
1903    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1904    lmc_trace(sc->lmc_device, "lmc_led_on out");
1905}
1906
1907void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
1908{
1909    lmc_trace(sc->lmc_device, "lmc_led_off in");
1910    if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
1911        lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
1912        return;
1913    }
1914    
1915    sc->lmc_miireg16 |= led;
1916    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1917    lmc_trace(sc->lmc_device, "lmc_led_off out");
1918}
1919
1920static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
1921{
1922    lmc_trace(sc->lmc_device, "lmc_reset in");
1923    sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
1924    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1925
1926    sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
1927    lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1928
1929    /*
1930     * make some of the GPIO pins be outputs
1931     */
1932    lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
1933
1934    /*
1935     * RESET low to force state reset.  This also forces
1936     * the transmitter clock to be internal, but we expect to reset
1937     * that later anyway.
1938     */
1939    sc->lmc_gpio &= ~(LMC_GEP_RESET);
1940    LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
1941
1942    /*
1943     * hold for more than 10 microseconds
1944     */
1945    udelay(50);
1946
1947    /*
1948     * stop driving Xilinx-related signals
1949     */
1950    lmc_gpio_mkinput(sc, LMC_GEP_RESET);
1951
1952    /*
1953     * Call media specific init routine
1954     */
1955    sc->lmc_media->init(sc);
1956
1957    sc->extra_stats.resetCount++;
1958    lmc_trace(sc->lmc_device, "lmc_reset out");
1959}
1960
1961static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
1962{
1963    u32 val;
1964    lmc_trace(sc->lmc_device, "lmc_dec_reset in");
1965
1966    /*
1967     * disable all interrupts
1968     */
1969    sc->lmc_intrmask = 0;
1970    LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
1971
1972    /*
1973     * Reset the chip with a software reset command.
1974     * Wait 10 microseconds (actually 50 PCI cycles but at
1975     * 33MHz that comes to two microseconds but wait a
1976     * bit longer anyways)
1977     */
1978    LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
1979    udelay(25);
1980#ifdef __sparc__
1981    sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
1982    sc->lmc_busmode = 0x00100000;
1983    sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
1984    LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
1985#endif
1986    sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
1987
1988    /*
1989     * We want:
1990     *   no ethernet address in frames we write
1991     *   disable padding (txdesc, padding disable)
1992     *   ignore runt frames (rdes0 bit 15)
1993     *   no receiver watchdog or transmitter jabber timer
1994     *       (csr15 bit 0,14 == 1)
1995     *   if using 16-bit CRC, turn off CRC (trans desc, crc disable)
1996     */
1997
1998    sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
1999                         | TULIP_CMD_FULLDUPLEX
2000                         | TULIP_CMD_PASSBADPKT
2001                         | TULIP_CMD_NOHEARTBEAT
2002                         | TULIP_CMD_PORTSELECT
2003                         | TULIP_CMD_RECEIVEALL
2004                         | TULIP_CMD_MUSTBEONE
2005                       );
2006    sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
2007                          | TULIP_CMD_THRESHOLDCTL
2008                          | TULIP_CMD_STOREFWD
2009                          | TULIP_CMD_TXTHRSHLDCTL
2010                        );
2011
2012    LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
2013
2014    /*
2015     * disable receiver watchdog and transmit jabber
2016     */
2017    val = LMC_CSR_READ(sc, csr_sia_general);
2018    val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
2019    LMC_CSR_WRITE(sc, csr_sia_general, val);
2020
2021    lmc_trace(sc->lmc_device, "lmc_dec_reset out");
2022}
2023
2024static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
2025                         size_t csr_size)
2026{
2027    lmc_trace(sc->lmc_device, "lmc_initcsrs in");
2028    sc->lmc_csrs.csr_busmode	        = csr_base +  0 * csr_size;
2029    sc->lmc_csrs.csr_txpoll		= csr_base +  1 * csr_size;
2030    sc->lmc_csrs.csr_rxpoll		= csr_base +  2 * csr_size;
2031    sc->lmc_csrs.csr_rxlist		= csr_base +  3 * csr_size;
2032    sc->lmc_csrs.csr_txlist		= csr_base +  4 * csr_size;
2033    sc->lmc_csrs.csr_status		= csr_base +  5 * csr_size;
2034    sc->lmc_csrs.csr_command	        = csr_base +  6 * csr_size;
2035    sc->lmc_csrs.csr_intr		= csr_base +  7 * csr_size;
2036    sc->lmc_csrs.csr_missed_frames	= csr_base +  8 * csr_size;
2037    sc->lmc_csrs.csr_9		        = csr_base +  9 * csr_size;
2038    sc->lmc_csrs.csr_10		        = csr_base + 10 * csr_size;
2039    sc->lmc_csrs.csr_11		        = csr_base + 11 * csr_size;
2040    sc->lmc_csrs.csr_12		        = csr_base + 12 * csr_size;
2041    sc->lmc_csrs.csr_13		        = csr_base + 13 * csr_size;
2042    sc->lmc_csrs.csr_14		        = csr_base + 14 * csr_size;
2043    sc->lmc_csrs.csr_15		        = csr_base + 15 * csr_size;
2044    lmc_trace(sc->lmc_device, "lmc_initcsrs out");
2045}
2046
2047static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
2048{
2049    lmc_softc_t *sc = dev_to_sc(dev);
2050    u32 csr6;
2051    unsigned long flags;
2052
2053    lmc_trace(dev, "lmc_driver_timeout in");
2054
2055    spin_lock_irqsave(&sc->lmc_lock, flags);
2056
2057    printk("%s: Xmitter busy|\n", dev->name);
2058
2059    sc->extra_stats.tx_tbusy_calls++;
2060    if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
2061	    goto bug_out;
2062
2063    /*
2064     * Chip seems to have locked up
2065     * Reset it
2066     * This whips out all our descriptor
2067     * table and starts from scartch
2068     */
2069
2070    LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
2071                  LMC_CSR_READ (sc, csr_status),
2072		  sc->extra_stats.tx_ProcTimeout);
2073
2074    lmc_running_reset (dev);
2075
2076    LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
2077    LMC_EVENT_LOG(LMC_EVENT_RESET2,
2078                  lmc_mii_readreg (sc, 0, 16),
2079                  lmc_mii_readreg (sc, 0, 17));
2080
2081    /* restart the tx processes */
2082    csr6 = LMC_CSR_READ (sc, csr_command);
2083    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
2084    LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
2085
2086    /* immediate transmit */
2087    LMC_CSR_WRITE (sc, csr_txpoll, 0);
2088
2089    sc->lmc_device->stats.tx_errors++;
2090    sc->extra_stats.tx_ProcTimeout++; /* -baz */
2091
2092    netif_trans_update(dev); /* prevent tx timeout */
2093
2094bug_out:
2095
2096    spin_unlock_irqrestore(&sc->lmc_lock, flags);
2097
2098    lmc_trace(dev, "lmc_driver_timeout out");
2099
2100
2101}