Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * This code is derived from the VIA reference driver (copyright message
   3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   4 * addition to the Linux kernel.
   5 *
   6 * The code has been merged into one source file, cleaned up to follow
   7 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   8 * for 64bit hardware platforms.
   9 *
  10 * TODO
  11 *	rx_copybreak/alignment
  12 *	More testing
  13 *
  14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  15 * Additional fixes and clean up: Francois Romieu
  16 *
  17 * This source has not been verified for use in safety critical systems.
  18 *
  19 * Please direct queries about the revamped driver to the linux-kernel
  20 * list not VIA.
  21 *
  22 * Original code:
  23 *
  24 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  25 * All rights reserved.
  26 *
  27 * This software may be redistributed and/or modified under
  28 * the terms of the GNU General Public License as published by the Free
  29 * Software Foundation; either version 2 of the License, or
  30 * any later version.
  31 *
  32 * This program is distributed in the hope that it will be useful, but
  33 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  34 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  35 * for more details.
  36 *
  37 * Author: Chuang Liang-Shing, AJ Jiang
  38 *
  39 * Date: Jan 24, 2003
  40 *
  41 * MODULE_LICENSE("GPL");
  42 *
  43 */
  44
  45
  46#include <linux/module.h>
  47#include <linux/types.h>
  48#include <linux/bitops.h>
  49#include <linux/init.h>
  50#include <linux/mm.h>
  51#include <linux/errno.h>
  52#include <linux/ioport.h>
  53#include <linux/pci.h>
  54#include <linux/kernel.h>
  55#include <linux/netdevice.h>
  56#include <linux/etherdevice.h>
  57#include <linux/skbuff.h>
  58#include <linux/delay.h>
  59#include <linux/timer.h>
  60#include <linux/slab.h>
  61#include <linux/interrupt.h>
  62#include <linux/string.h>
  63#include <linux/wait.h>
  64#include <linux/io.h>
  65#include <linux/if.h>
  66#include <linux/uaccess.h>
  67#include <linux/proc_fs.h>
  68#include <linux/inetdevice.h>
  69#include <linux/reboot.h>
  70#include <linux/ethtool.h>
  71#include <linux/mii.h>
  72#include <linux/in.h>
  73#include <linux/if_arp.h>
  74#include <linux/if_vlan.h>
  75#include <linux/ip.h>
  76#include <linux/tcp.h>
  77#include <linux/udp.h>
  78#include <linux/crc-ccitt.h>
  79#include <linux/crc32.h>
  80
  81#include "via-velocity.h"
  82
  83
  84static int velocity_nics;
  85static int msglevel = MSG_LEVEL_INFO;
  86
  87/**
  88 *	mac_get_cam_mask	-	Read a CAM mask
  89 *	@regs: register block for this velocity
  90 *	@mask: buffer to store mask
  91 *
  92 *	Fetch the mask bits of the selected CAM and store them into the
  93 *	provided mask buffer.
  94 */
  95static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
  96{
  97	int i;
  98
  99	/* Select CAM mask */
 100	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 101
 102	writeb(0, &regs->CAMADDR);
 103
 104	/* read mask */
 105	for (i = 0; i < 8; i++)
 106		*mask++ = readb(&(regs->MARCAM[i]));
 107
 108	/* disable CAMEN */
 109	writeb(0, &regs->CAMADDR);
 110
 111	/* Select mar */
 112	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 113}
 114
 115
 116/**
 117 *	mac_set_cam_mask	-	Set a CAM mask
 118 *	@regs: register block for this velocity
 119 *	@mask: CAM mask to load
 120 *
 121 *	Store a new mask into a CAM
 122 */
 123static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 124{
 125	int i;
 126	/* Select CAM mask */
 127	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 128
 129	writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 130
 131	for (i = 0; i < 8; i++)
 132		writeb(*mask++, &(regs->MARCAM[i]));
 133
 134	/* disable CAMEN */
 135	writeb(0, &regs->CAMADDR);
 136
 137	/* Select mar */
 138	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 139}
 140
 141static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 142{
 143	int i;
 144	/* Select CAM mask */
 145	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 146
 147	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 148
 149	for (i = 0; i < 8; i++)
 150		writeb(*mask++, &(regs->MARCAM[i]));
 151
 152	/* disable CAMEN */
 153	writeb(0, &regs->CAMADDR);
 154
 155	/* Select mar */
 156	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 157}
 158
 159/**
 160 *	mac_set_cam	-	set CAM data
 161 *	@regs: register block of this velocity
 162 *	@idx: Cam index
 163 *	@addr: 2 or 6 bytes of CAM data
 164 *
 165 *	Load an address or vlan tag into a CAM
 166 */
 167static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 168{
 169	int i;
 170
 171	/* Select CAM mask */
 172	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 173
 174	idx &= (64 - 1);
 175
 176	writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 177
 178	for (i = 0; i < 6; i++)
 179		writeb(*addr++, &(regs->MARCAM[i]));
 180
 181	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 182
 183	udelay(10);
 184
 185	writeb(0, &regs->CAMADDR);
 186
 187	/* Select mar */
 188	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 189}
 190
 191static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 192			     const u8 *addr)
 193{
 194
 195	/* Select CAM mask */
 196	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 197
 198	idx &= (64 - 1);
 199
 200	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 201	writew(*((u16 *) addr), &regs->MARCAM[0]);
 202
 203	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 204
 205	udelay(10);
 206
 207	writeb(0, &regs->CAMADDR);
 208
 209	/* Select mar */
 210	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 211}
 212
 213
 214/**
 215 *	mac_wol_reset	-	reset WOL after exiting low power
 216 *	@regs: register block of this velocity
 217 *
 218 *	Called after we drop out of wake on lan mode in order to
 219 *	reset the Wake on lan features. This function doesn't restore
 220 *	the rest of the logic from the result of sleep/wakeup
 221 */
 222static void mac_wol_reset(struct mac_regs __iomem *regs)
 223{
 224
 225	/* Turn off SWPTAG right after leaving power mode */
 226	BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 227	/* clear sticky bits */
 228	BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 229
 230	BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 231	BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 232	/* disable force PME-enable */
 233	writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 234	/* disable power-event config bit */
 235	writew(0xFFFF, &regs->WOLCRClr);
 236	/* clear power status */
 237	writew(0xFFFF, &regs->WOLSRClr);
 238}
 239
 240static const struct ethtool_ops velocity_ethtool_ops;
 241
 242/*
 243    Define module options
 244*/
 245
 246MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 247MODULE_LICENSE("GPL");
 248MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 249
 250#define VELOCITY_PARAM(N, D) \
 251	static int N[MAX_UNITS] = OPTION_DEFAULT;\
 252	module_param_array(N, int, NULL, 0); \
 253	MODULE_PARM_DESC(N, D);
 254
 255#define RX_DESC_MIN     64
 256#define RX_DESC_MAX     255
 257#define RX_DESC_DEF     64
 258VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 259
 260#define TX_DESC_MIN     16
 261#define TX_DESC_MAX     256
 262#define TX_DESC_DEF     64
 263VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 264
 265#define RX_THRESH_MIN   0
 266#define RX_THRESH_MAX   3
 267#define RX_THRESH_DEF   0
 268/* rx_thresh[] is used for controlling the receive fifo threshold.
 269   0: indicate the rxfifo threshold is 128 bytes.
 270   1: indicate the rxfifo threshold is 512 bytes.
 271   2: indicate the rxfifo threshold is 1024 bytes.
 272   3: indicate the rxfifo threshold is store & forward.
 273*/
 274VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 275
 276#define DMA_LENGTH_MIN  0
 277#define DMA_LENGTH_MAX  7
 278#define DMA_LENGTH_DEF  6
 279
 280/* DMA_length[] is used for controlling the DMA length
 281   0: 8 DWORDs
 282   1: 16 DWORDs
 283   2: 32 DWORDs
 284   3: 64 DWORDs
 285   4: 128 DWORDs
 286   5: 256 DWORDs
 287   6: SF(flush till emply)
 288   7: SF(flush till emply)
 289*/
 290VELOCITY_PARAM(DMA_length, "DMA length");
 291
 292#define IP_ALIG_DEF     0
 293/* IP_byte_align[] is used for IP header DWORD byte aligned
 294   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 295   1: indicate the IP header will be DWORD byte aligned.
 296      In some environment, the IP header should be DWORD byte aligned,
 297      or the packet will be droped when we receive it. (eg: IPVS)
 298*/
 299VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 300
 301#define FLOW_CNTL_DEF   1
 302#define FLOW_CNTL_MIN   1
 303#define FLOW_CNTL_MAX   5
 304
 305/* flow_control[] is used for setting the flow control ability of NIC.
 306   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 307   2: enable TX flow control.
 308   3: enable RX flow control.
 309   4: enable RX/TX flow control.
 310   5: disable
 311*/
 312VELOCITY_PARAM(flow_control, "Enable flow control ability");
 313
 314#define MED_LNK_DEF 0
 315#define MED_LNK_MIN 0
 316#define MED_LNK_MAX 5
 317/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 318   0: indicate autonegotiation for both speed and duplex mode
 319   1: indicate 100Mbps half duplex mode
 320   2: indicate 100Mbps full duplex mode
 321   3: indicate 10Mbps half duplex mode
 322   4: indicate 10Mbps full duplex mode
 323   5: indicate 1000Mbps full duplex mode
 324
 325   Note:
 326   if EEPROM have been set to the force mode, this option is ignored
 327   by driver.
 328*/
 329VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 330
 331#define VAL_PKT_LEN_DEF     0
 332/* ValPktLen[] is used for setting the checksum offload ability of NIC.
 333   0: Receive frame with invalid layer 2 length (Default)
 334   1: Drop frame with invalid layer 2 length
 335*/
 336VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
 337
 338#define WOL_OPT_DEF     0
 339#define WOL_OPT_MIN     0
 340#define WOL_OPT_MAX     7
 341/* wol_opts[] is used for controlling wake on lan behavior.
 342   0: Wake up if recevied a magic packet. (Default)
 343   1: Wake up if link status is on/off.
 344   2: Wake up if recevied an arp packet.
 345   4: Wake up if recevied any unicast packet.
 346   Those value can be sumed up to support more than one option.
 347*/
 348VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 349
 350static int rx_copybreak = 200;
 351module_param(rx_copybreak, int, 0644);
 352MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 353
 354/*
 355 *	Internal board variants. At the moment we have only one
 356 */
 357static struct velocity_info_tbl chip_info_table[] = {
 358	{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 359	{ }
 360};
 361
 362/*
 363 *	Describe the PCI device identifiers that we support in this
 364 *	device driver. Used for hotplug autoloading.
 365 */
 366static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
 367	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 368	{ }
 369};
 370
 371MODULE_DEVICE_TABLE(pci, velocity_id_table);
 372
 373/**
 374 *	get_chip_name	- 	identifier to name
 375 *	@id: chip identifier
 376 *
 377 *	Given a chip identifier return a suitable description. Returns
 378 *	a pointer a static string valid while the driver is loaded.
 379 */
 380static const char __devinit *get_chip_name(enum chip_type chip_id)
 381{
 382	int i;
 383	for (i = 0; chip_info_table[i].name != NULL; i++)
 384		if (chip_info_table[i].chip_id == chip_id)
 385			break;
 386	return chip_info_table[i].name;
 387}
 388
 389/**
 390 *	velocity_remove1	-	device unplug
 391 *	@pdev: PCI device being removed
 392 *
 393 *	Device unload callback. Called on an unplug or on module
 394 *	unload for each active device that is present. Disconnects
 395 *	the device from the network layer and frees all the resources
 396 */
 397static void __devexit velocity_remove1(struct pci_dev *pdev)
 398{
 399	struct net_device *dev = pci_get_drvdata(pdev);
 400	struct velocity_info *vptr = netdev_priv(dev);
 401
 402	unregister_netdev(dev);
 403	iounmap(vptr->mac_regs);
 404	pci_release_regions(pdev);
 405	pci_disable_device(pdev);
 406	pci_set_drvdata(pdev, NULL);
 407	free_netdev(dev);
 408
 409	velocity_nics--;
 410}
 411
 412/**
 413 *	velocity_set_int_opt	-	parser for integer options
 414 *	@opt: pointer to option value
 415 *	@val: value the user requested (or -1 for default)
 416 *	@min: lowest value allowed
 417 *	@max: highest value allowed
 418 *	@def: default value
 419 *	@name: property name
 420 *	@dev: device name
 421 *
 422 *	Set an integer property in the module options. This function does
 423 *	all the verification and checking as well as reporting so that
 424 *	we don't duplicate code for each option.
 425 */
 426static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
 427{
 428	if (val == -1)
 429		*opt = def;
 430	else if (val < min || val > max) {
 431		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 432					devname, name, min, max);
 433		*opt = def;
 434	} else {
 435		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
 436					devname, name, val);
 437		*opt = val;
 438	}
 439}
 440
 441/**
 442 *	velocity_set_bool_opt	-	parser for boolean options
 443 *	@opt: pointer to option value
 444 *	@val: value the user requested (or -1 for default)
 445 *	@def: default value (yes/no)
 446 *	@flag: numeric value to set for true.
 447 *	@name: property name
 448 *	@dev: device name
 449 *
 450 *	Set a boolean property in the module options. This function does
 451 *	all the verification and checking as well as reporting so that
 452 *	we don't duplicate code for each option.
 453 */
 454static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
 455{
 456	(*opt) &= (~flag);
 457	if (val == -1)
 458		*opt |= (def ? flag : 0);
 459	else if (val < 0 || val > 1) {
 460		printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
 461			devname, name);
 462		*opt |= (def ? flag : 0);
 463	} else {
 464		printk(KERN_INFO "%s: set parameter %s to %s\n",
 465			devname, name, val ? "TRUE" : "FALSE");
 466		*opt |= (val ? flag : 0);
 467	}
 468}
 469
 470/**
 471 *	velocity_get_options	-	set options on device
 472 *	@opts: option structure for the device
 473 *	@index: index of option to use in module options array
 474 *	@devname: device name
 475 *
 476 *	Turn the module and command options into a single structure
 477 *	for the current device
 478 */
 479static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
 480{
 481
 482	velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
 483	velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
 484	velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
 485	velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
 486
 487	velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
 488	velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
 489	velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
 490	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
 491	velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
 492	opts->numrx = (opts->numrx & ~3);
 493}
 494
 495/**
 496 *	velocity_init_cam_filter	-	initialise CAM
 497 *	@vptr: velocity to program
 498 *
 499 *	Initialize the content addressable memory used for filters. Load
 500 *	appropriately according to the presence of VLAN
 501 */
 502static void velocity_init_cam_filter(struct velocity_info *vptr)
 503{
 504	struct mac_regs __iomem *regs = vptr->mac_regs;
 505	unsigned int vid, i = 0;
 506
 507	/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 508	WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 509	WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 510
 511	/* Disable all CAMs */
 512	memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 513	memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 514	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 515	mac_set_cam_mask(regs, vptr->mCAMmask);
 516
 517	/* Enable VCAMs */
 518	for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
 519		mac_set_vlan_cam(regs, i, (u8 *) &vid);
 520		vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 521		if (++i >= VCAM_SIZE)
 522			break;
 523	}
 524	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 525}
 526
 527static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 528{
 529	struct velocity_info *vptr = netdev_priv(dev);
 530
 531	spin_lock_irq(&vptr->lock);
 532	set_bit(vid, vptr->active_vlans);
 533	velocity_init_cam_filter(vptr);
 534	spin_unlock_irq(&vptr->lock);
 535}
 536
 537static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 538{
 539	struct velocity_info *vptr = netdev_priv(dev);
 540
 541	spin_lock_irq(&vptr->lock);
 542	clear_bit(vid, vptr->active_vlans);
 543	velocity_init_cam_filter(vptr);
 544	spin_unlock_irq(&vptr->lock);
 545}
 546
 547static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 548{
 549	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 550}
 551
 552/**
 553 *	velocity_rx_reset	-	handle a receive reset
 554 *	@vptr: velocity we are resetting
 555 *
 556 *	Reset the ownership and status for the receive ring side.
 557 *	Hand all the receive queue to the NIC.
 558 */
 559static void velocity_rx_reset(struct velocity_info *vptr)
 560{
 561
 562	struct mac_regs __iomem *regs = vptr->mac_regs;
 563	int i;
 564
 565	velocity_init_rx_ring_indexes(vptr);
 566
 567	/*
 568	 *	Init state, all RD entries belong to the NIC
 569	 */
 570	for (i = 0; i < vptr->options.numrx; ++i)
 571		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 572
 573	writew(vptr->options.numrx, &regs->RBRDU);
 574	writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 575	writew(0, &regs->RDIdx);
 576	writew(vptr->options.numrx - 1, &regs->RDCSize);
 577}
 578
 579/**
 580 *	velocity_get_opt_media_mode	-	get media selection
 581 *	@vptr: velocity adapter
 582 *
 583 *	Get the media mode stored in EEPROM or module options and load
 584 *	mii_status accordingly. The requested link state information
 585 *	is also returned.
 586 */
 587static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 588{
 589	u32 status = 0;
 590
 591	switch (vptr->options.spd_dpx) {
 592	case SPD_DPX_AUTO:
 593		status = VELOCITY_AUTONEG_ENABLE;
 594		break;
 595	case SPD_DPX_100_FULL:
 596		status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 597		break;
 598	case SPD_DPX_10_FULL:
 599		status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 600		break;
 601	case SPD_DPX_100_HALF:
 602		status = VELOCITY_SPEED_100;
 603		break;
 604	case SPD_DPX_10_HALF:
 605		status = VELOCITY_SPEED_10;
 606		break;
 607	case SPD_DPX_1000_FULL:
 608		status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 609		break;
 610	}
 611	vptr->mii_status = status;
 612	return status;
 613}
 614
 615/**
 616 *	safe_disable_mii_autopoll	-	autopoll off
 617 *	@regs: velocity registers
 618 *
 619 *	Turn off the autopoll and wait for it to disable on the chip
 620 */
 621static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 622{
 623	u16 ww;
 624
 625	/*  turn off MAUTO */
 626	writeb(0, &regs->MIICR);
 627	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 628		udelay(1);
 629		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 630			break;
 631	}
 632}
 633
 634/**
 635 *	enable_mii_autopoll	-	turn on autopolling
 636 *	@regs: velocity registers
 637 *
 638 *	Enable the MII link status autopoll feature on the Velocity
 639 *	hardware. Wait for it to enable.
 640 */
 641static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 642{
 643	int ii;
 644
 645	writeb(0, &(regs->MIICR));
 646	writeb(MIIADR_SWMPL, &regs->MIIADR);
 647
 648	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 649		udelay(1);
 650		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 651			break;
 652	}
 653
 654	writeb(MIICR_MAUTO, &regs->MIICR);
 655
 656	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 657		udelay(1);
 658		if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 659			break;
 660	}
 661
 662}
 663
 664/**
 665 *	velocity_mii_read	-	read MII data
 666 *	@regs: velocity registers
 667 *	@index: MII register index
 668 *	@data: buffer for received data
 669 *
 670 *	Perform a single read of an MII 16bit register. Returns zero
 671 *	on success or -ETIMEDOUT if the PHY did not respond.
 672 */
 673static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 674{
 675	u16 ww;
 676
 677	/*
 678	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
 679	 */
 680	safe_disable_mii_autopoll(regs);
 681
 682	writeb(index, &regs->MIIADR);
 683
 684	BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 685
 686	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 687		if (!(readb(&regs->MIICR) & MIICR_RCMD))
 688			break;
 689	}
 690
 691	*data = readw(&regs->MIIDATA);
 692
 693	enable_mii_autopoll(regs);
 694	if (ww == W_MAX_TIMEOUT)
 695		return -ETIMEDOUT;
 696	return 0;
 697}
 698
 699
 700/**
 701 *	mii_check_media_mode	-	check media state
 702 *	@regs: velocity registers
 703 *
 704 *	Check the current MII status and determine the link status
 705 *	accordingly
 706 */
 707static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 708{
 709	u32 status = 0;
 710	u16 ANAR;
 711
 712	if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 713		status |= VELOCITY_LINK_FAIL;
 714
 715	if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 716		status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 717	else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 718		status |= (VELOCITY_SPEED_1000);
 719	else {
 720		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 721		if (ANAR & ADVERTISE_100FULL)
 722			status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 723		else if (ANAR & ADVERTISE_100HALF)
 724			status |= VELOCITY_SPEED_100;
 725		else if (ANAR & ADVERTISE_10FULL)
 726			status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 727		else
 728			status |= (VELOCITY_SPEED_10);
 729	}
 730
 731	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 732		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 733		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 734		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 735			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 736				status |= VELOCITY_AUTONEG_ENABLE;
 737		}
 738	}
 739
 740	return status;
 741}
 742
 743/**
 744 *	velocity_mii_write	-	write MII data
 745 *	@regs: velocity registers
 746 *	@index: MII register index
 747 *	@data: 16bit data for the MII register
 748 *
 749 *	Perform a single write to an MII 16bit register. Returns zero
 750 *	on success or -ETIMEDOUT if the PHY did not respond.
 751 */
 752static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 753{
 754	u16 ww;
 755
 756	/*
 757	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
 758	 */
 759	safe_disable_mii_autopoll(regs);
 760
 761	/* MII reg offset */
 762	writeb(mii_addr, &regs->MIIADR);
 763	/* set MII data */
 764	writew(data, &regs->MIIDATA);
 765
 766	/* turn on MIICR_WCMD */
 767	BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 768
 769	/* W_MAX_TIMEOUT is the timeout period */
 770	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 771		udelay(5);
 772		if (!(readb(&regs->MIICR) & MIICR_WCMD))
 773			break;
 774	}
 775	enable_mii_autopoll(regs);
 776
 777	if (ww == W_MAX_TIMEOUT)
 778		return -ETIMEDOUT;
 779	return 0;
 780}
 781
 782/**
 783 *	set_mii_flow_control	-	flow control setup
 784 *	@vptr: velocity interface
 785 *
 786 *	Set up the flow control on this interface according to
 787 *	the supplied user/eeprom options.
 788 */
 789static void set_mii_flow_control(struct velocity_info *vptr)
 790{
 791	/*Enable or Disable PAUSE in ANAR */
 792	switch (vptr->options.flow_cntl) {
 793	case FLOW_CNTL_TX:
 794		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 795		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 796		break;
 797
 798	case FLOW_CNTL_RX:
 799		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 800		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 801		break;
 802
 803	case FLOW_CNTL_TX_RX:
 804		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 805		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 806		break;
 807
 808	case FLOW_CNTL_DISABLE:
 809		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 810		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 811		break;
 812	default:
 813		break;
 814	}
 815}
 816
 817/**
 818 *	mii_set_auto_on		-	autonegotiate on
 819 *	@vptr: velocity
 820 *
 821 *	Enable autonegotation on this interface
 822 */
 823static void mii_set_auto_on(struct velocity_info *vptr)
 824{
 825	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 826		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 827	else
 828		MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 829}
 830
 831static u32 check_connection_type(struct mac_regs __iomem *regs)
 832{
 833	u32 status = 0;
 834	u8 PHYSR0;
 835	u16 ANAR;
 836	PHYSR0 = readb(&regs->PHYSR0);
 837
 838	/*
 839	   if (!(PHYSR0 & PHYSR0_LINKGD))
 840	   status|=VELOCITY_LINK_FAIL;
 841	 */
 842
 843	if (PHYSR0 & PHYSR0_FDPX)
 844		status |= VELOCITY_DUPLEX_FULL;
 845
 846	if (PHYSR0 & PHYSR0_SPDG)
 847		status |= VELOCITY_SPEED_1000;
 848	else if (PHYSR0 & PHYSR0_SPD10)
 849		status |= VELOCITY_SPEED_10;
 850	else
 851		status |= VELOCITY_SPEED_100;
 852
 853	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 854		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 855		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 856		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 857			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 858				status |= VELOCITY_AUTONEG_ENABLE;
 859		}
 860	}
 861
 862	return status;
 863}
 864
 865
 866
 867/**
 868 *	velocity_set_media_mode		-	set media mode
 869 *	@mii_status: old MII link state
 870 *
 871 *	Check the media link state and configure the flow control
 872 *	PHY and also velocity hardware setup accordingly. In particular
 873 *	we need to set up CD polling and frame bursting.
 874 */
 875static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 876{
 877	u32 curr_status;
 878	struct mac_regs __iomem *regs = vptr->mac_regs;
 879
 880	vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 881	curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
 882
 883	/* Set mii link status */
 884	set_mii_flow_control(vptr);
 885
 886	/*
 887	   Check if new status is consistent with current status
 888	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 889	       (mii_status==curr_status)) {
 890	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
 891	   vptr->mii_status=check_connection_type(vptr->mac_regs);
 892	   VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
 893	   return 0;
 894	   }
 895	 */
 896
 897	if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 898		MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 899
 900	/*
 901	 *	If connection type is AUTO
 902	 */
 903	if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 904		VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
 905		/* clear force MAC mode bit */
 906		BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 907		/* set duplex mode of MAC according to duplex mode of MII */
 908		MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 909		MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 910		MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 911
 912		/* enable AUTO-NEGO mode */
 913		mii_set_auto_on(vptr);
 914	} else {
 915		u16 CTRL1000;
 916		u16 ANAR;
 917		u8 CHIPGCR;
 918
 919		/*
 920		 * 1. if it's 3119, disable frame bursting in halfduplex mode
 921		 *    and enable it in fullduplex mode
 922		 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 923		 * 3. only enable CD heart beat counter in 10HD mode
 924		 */
 925
 926		/* set force MAC mode bit */
 927		BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 928
 929		CHIPGCR = readb(&regs->CHIPGCR);
 930
 931		if (mii_status & VELOCITY_SPEED_1000)
 932			CHIPGCR |= CHIPGCR_FCGMII;
 933		else
 934			CHIPGCR &= ~CHIPGCR_FCGMII;
 935
 936		if (mii_status & VELOCITY_DUPLEX_FULL) {
 937			CHIPGCR |= CHIPGCR_FCFDX;
 938			writeb(CHIPGCR, &regs->CHIPGCR);
 939			VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
 940			if (vptr->rev_id < REV_ID_VT3216_A0)
 941				BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 942		} else {
 943			CHIPGCR &= ~CHIPGCR_FCFDX;
 944			VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
 945			writeb(CHIPGCR, &regs->CHIPGCR);
 946			if (vptr->rev_id < REV_ID_VT3216_A0)
 947				BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 948		}
 949
 950		velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 951		CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 952		if ((mii_status & VELOCITY_SPEED_1000) &&
 953		    (mii_status & VELOCITY_DUPLEX_FULL)) {
 954			CTRL1000 |= ADVERTISE_1000FULL;
 955		}
 956		velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 957
 958		if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 959			BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 960		else
 961			BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 962
 963		/* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 964		velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 965		ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 966		if (mii_status & VELOCITY_SPEED_100) {
 967			if (mii_status & VELOCITY_DUPLEX_FULL)
 968				ANAR |= ADVERTISE_100FULL;
 969			else
 970				ANAR |= ADVERTISE_100HALF;
 971		} else if (mii_status & VELOCITY_SPEED_10) {
 972			if (mii_status & VELOCITY_DUPLEX_FULL)
 973				ANAR |= ADVERTISE_10FULL;
 974			else
 975				ANAR |= ADVERTISE_10HALF;
 976		}
 977		velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 978		/* enable AUTO-NEGO mode */
 979		mii_set_auto_on(vptr);
 980		/* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 981	}
 982	/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 983	/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 984	return VELOCITY_LINK_CHANGE;
 985}
 986
 987/**
 988 *	velocity_print_link_status	-	link status reporting
 989 *	@vptr: velocity to report on
 990 *
 991 *	Turn the link status of the velocity card into a kernel log
 992 *	description of the new link state, detailing speed and duplex
 993 *	status
 994 */
 995static void velocity_print_link_status(struct velocity_info *vptr)
 996{
 997
 998	if (vptr->mii_status & VELOCITY_LINK_FAIL) {
 999		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1000	} else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1001		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1002
1003		if (vptr->mii_status & VELOCITY_SPEED_1000)
1004			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1005		else if (vptr->mii_status & VELOCITY_SPEED_100)
1006			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1007		else
1008			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1009
1010		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1011			VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1012		else
1013			VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1014	} else {
1015		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1016		switch (vptr->options.spd_dpx) {
1017		case SPD_DPX_1000_FULL:
1018			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1019			break;
1020		case SPD_DPX_100_HALF:
1021			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1022			break;
1023		case SPD_DPX_100_FULL:
1024			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1025			break;
1026		case SPD_DPX_10_HALF:
1027			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1028			break;
1029		case SPD_DPX_10_FULL:
1030			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1031			break;
1032		default:
1033			break;
1034		}
1035	}
1036}
1037
1038/**
1039 *	enable_flow_control_ability	-	flow control
1040 *	@vptr: veloity to configure
1041 *
1042 *	Set up flow control according to the flow control options
1043 *	determined by the eeprom/configuration.
1044 */
1045static void enable_flow_control_ability(struct velocity_info *vptr)
1046{
1047
1048	struct mac_regs __iomem *regs = vptr->mac_regs;
1049
1050	switch (vptr->options.flow_cntl) {
1051
1052	case FLOW_CNTL_DEFAULT:
1053		if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1054			writel(CR0_FDXRFCEN, &regs->CR0Set);
1055		else
1056			writel(CR0_FDXRFCEN, &regs->CR0Clr);
1057
1058		if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1059			writel(CR0_FDXTFCEN, &regs->CR0Set);
1060		else
1061			writel(CR0_FDXTFCEN, &regs->CR0Clr);
1062		break;
1063
1064	case FLOW_CNTL_TX:
1065		writel(CR0_FDXTFCEN, &regs->CR0Set);
1066		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1067		break;
1068
1069	case FLOW_CNTL_RX:
1070		writel(CR0_FDXRFCEN, &regs->CR0Set);
1071		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1072		break;
1073
1074	case FLOW_CNTL_TX_RX:
1075		writel(CR0_FDXTFCEN, &regs->CR0Set);
1076		writel(CR0_FDXRFCEN, &regs->CR0Set);
1077		break;
1078
1079	case FLOW_CNTL_DISABLE:
1080		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1081		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1082		break;
1083
1084	default:
1085		break;
1086	}
1087
1088}
1089
1090/**
1091 *	velocity_soft_reset	-	soft reset
1092 *	@vptr: velocity to reset
1093 *
1094 *	Kick off a soft reset of the velocity adapter and then poll
1095 *	until the reset sequence has completed before returning.
1096 */
1097static int velocity_soft_reset(struct velocity_info *vptr)
1098{
1099	struct mac_regs __iomem *regs = vptr->mac_regs;
1100	int i = 0;
1101
1102	writel(CR0_SFRST, &regs->CR0Set);
1103
1104	for (i = 0; i < W_MAX_TIMEOUT; i++) {
1105		udelay(5);
1106		if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1107			break;
1108	}
1109
1110	if (i == W_MAX_TIMEOUT) {
1111		writel(CR0_FORSRST, &regs->CR0Set);
1112		/* FIXME: PCI POSTING */
1113		/* delay 2ms */
1114		mdelay(2);
1115	}
1116	return 0;
1117}
1118
1119/**
1120 *	velocity_set_multi	-	filter list change callback
1121 *	@dev: network device
1122 *
1123 *	Called by the network layer when the filter lists need to change
1124 *	for a velocity adapter. Reload the CAMs with the new address
1125 *	filter ruleset.
1126 */
1127static void velocity_set_multi(struct net_device *dev)
1128{
1129	struct velocity_info *vptr = netdev_priv(dev);
1130	struct mac_regs __iomem *regs = vptr->mac_regs;
1131	u8 rx_mode;
1132	int i;
1133	struct netdev_hw_addr *ha;
1134
1135	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1136		writel(0xffffffff, &regs->MARCAM[0]);
1137		writel(0xffffffff, &regs->MARCAM[4]);
1138		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1139	} else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1140		   (dev->flags & IFF_ALLMULTI)) {
1141		writel(0xffffffff, &regs->MARCAM[0]);
1142		writel(0xffffffff, &regs->MARCAM[4]);
1143		rx_mode = (RCR_AM | RCR_AB);
1144	} else {
1145		int offset = MCAM_SIZE - vptr->multicast_limit;
1146		mac_get_cam_mask(regs, vptr->mCAMmask);
1147
1148		i = 0;
1149		netdev_for_each_mc_addr(ha, dev) {
1150			mac_set_cam(regs, i + offset, ha->addr);
1151			vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1152			i++;
1153		}
1154
1155		mac_set_cam_mask(regs, vptr->mCAMmask);
1156		rx_mode = RCR_AM | RCR_AB | RCR_AP;
1157	}
1158	if (dev->mtu > 1500)
1159		rx_mode |= RCR_AL;
1160
1161	BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1162
1163}
1164
1165/*
1166 * MII access , media link mode setting functions
1167 */
1168
1169/**
1170 *	mii_init	-	set up MII
1171 *	@vptr: velocity adapter
1172 *	@mii_status:  links tatus
1173 *
1174 *	Set up the PHY for the current link state.
1175 */
1176static void mii_init(struct velocity_info *vptr, u32 mii_status)
1177{
1178	u16 BMCR;
1179
1180	switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1181	case PHYID_CICADA_CS8201:
1182		/*
1183		 *	Reset to hardware default
1184		 */
1185		MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1186		/*
1187		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1188		 *	off it in NWay-forced half mode for NWay-forced v.s.
1189		 *	legacy-forced issue.
1190		 */
1191		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1192			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1193		else
1194			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1195		/*
1196		 *	Turn on Link/Activity LED enable bit for CIS8201
1197		 */
1198		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1199		break;
1200	case PHYID_VT3216_32BIT:
1201	case PHYID_VT3216_64BIT:
1202		/*
1203		 *	Reset to hardware default
1204		 */
1205		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1206		/*
1207		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1208		 *	off it in NWay-forced half mode for NWay-forced v.s.
1209		 *	legacy-forced issue
1210		 */
1211		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1212			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1213		else
1214			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215		break;
1216
1217	case PHYID_MARVELL_1000:
1218	case PHYID_MARVELL_1000S:
1219		/*
1220		 *	Assert CRS on Transmit
1221		 */
1222		MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1223		/*
1224		 *	Reset to hardware default
1225		 */
1226		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1227		break;
1228	default:
1229		;
1230	}
1231	velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1232	if (BMCR & BMCR_ISOLATE) {
1233		BMCR &= ~BMCR_ISOLATE;
1234		velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1235	}
1236}
1237
1238/**
1239 * setup_queue_timers	-	Setup interrupt timers
1240 *
1241 * Setup interrupt frequency during suppression (timeout if the frame
1242 * count isn't filled).
1243 */
1244static void setup_queue_timers(struct velocity_info *vptr)
1245{
1246	/* Only for newer revisions */
1247	if (vptr->rev_id >= REV_ID_VT3216_A0) {
1248		u8 txqueue_timer = 0;
1249		u8 rxqueue_timer = 0;
1250
1251		if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1252				VELOCITY_SPEED_100)) {
1253			txqueue_timer = vptr->options.txqueue_timer;
1254			rxqueue_timer = vptr->options.rxqueue_timer;
1255		}
1256
1257		writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1258		writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1259	}
1260}
1261/**
1262 * setup_adaptive_interrupts  -  Setup interrupt suppression
1263 *
1264 * @vptr velocity adapter
1265 *
1266 * The velocity is able to suppress interrupt during high interrupt load.
1267 * This function turns on that feature.
1268 */
1269static void setup_adaptive_interrupts(struct velocity_info *vptr)
1270{
1271	struct mac_regs __iomem *regs = vptr->mac_regs;
1272	u16 tx_intsup = vptr->options.tx_intsup;
1273	u16 rx_intsup = vptr->options.rx_intsup;
1274
1275	/* Setup default interrupt mask (will be changed below) */
1276	vptr->int_mask = INT_MASK_DEF;
1277
1278	/* Set Tx Interrupt Suppression Threshold */
1279	writeb(CAMCR_PS0, &regs->CAMCR);
1280	if (tx_intsup != 0) {
1281		vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1282				ISR_PTX2I | ISR_PTX3I);
1283		writew(tx_intsup, &regs->ISRCTL);
1284	} else
1285		writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1286
1287	/* Set Rx Interrupt Suppression Threshold */
1288	writeb(CAMCR_PS1, &regs->CAMCR);
1289	if (rx_intsup != 0) {
1290		vptr->int_mask &= ~ISR_PRXI;
1291		writew(rx_intsup, &regs->ISRCTL);
1292	} else
1293		writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1294
1295	/* Select page to interrupt hold timer */
1296	writeb(0, &regs->CAMCR);
1297}
1298
1299/**
1300 *	velocity_init_registers	-	initialise MAC registers
1301 *	@vptr: velocity to init
1302 *	@type: type of initialisation (hot or cold)
1303 *
1304 *	Initialise the MAC on a reset or on first set up on the
1305 *	hardware.
1306 */
1307static void velocity_init_registers(struct velocity_info *vptr,
1308				    enum velocity_init_type type)
1309{
1310	struct mac_regs __iomem *regs = vptr->mac_regs;
1311	int i, mii_status;
1312
1313	mac_wol_reset(regs);
1314
1315	switch (type) {
1316	case VELOCITY_INIT_RESET:
1317	case VELOCITY_INIT_WOL:
1318
1319		netif_stop_queue(vptr->dev);
1320
1321		/*
1322		 *	Reset RX to prevent RX pointer not on the 4X location
1323		 */
1324		velocity_rx_reset(vptr);
1325		mac_rx_queue_run(regs);
1326		mac_rx_queue_wake(regs);
1327
1328		mii_status = velocity_get_opt_media_mode(vptr);
1329		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1330			velocity_print_link_status(vptr);
1331			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1332				netif_wake_queue(vptr->dev);
1333		}
1334
1335		enable_flow_control_ability(vptr);
1336
1337		mac_clear_isr(regs);
1338		writel(CR0_STOP, &regs->CR0Clr);
1339		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1340							&regs->CR0Set);
1341
1342		break;
1343
1344	case VELOCITY_INIT_COLD:
1345	default:
1346		/*
1347		 *	Do reset
1348		 */
1349		velocity_soft_reset(vptr);
1350		mdelay(5);
1351
1352		mac_eeprom_reload(regs);
1353		for (i = 0; i < 6; i++)
1354			writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
1355
1356		/*
1357		 *	clear Pre_ACPI bit.
1358		 */
1359		BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1360		mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1361		mac_set_dma_length(regs, vptr->options.DMA_length);
1362
1363		writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1364		/*
1365		 *	Back off algorithm use original IEEE standard
1366		 */
1367		BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1368
1369		/*
1370		 *	Init CAM filter
1371		 */
1372		velocity_init_cam_filter(vptr);
1373
1374		/*
1375		 *	Set packet filter: Receive directed and broadcast address
1376		 */
1377		velocity_set_multi(vptr->dev);
1378
1379		/*
1380		 *	Enable MII auto-polling
1381		 */
1382		enable_mii_autopoll(regs);
1383
1384		setup_adaptive_interrupts(vptr);
1385
1386		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1387		writew(vptr->options.numrx - 1, &regs->RDCSize);
1388		mac_rx_queue_run(regs);
1389		mac_rx_queue_wake(regs);
1390
1391		writew(vptr->options.numtx - 1, &regs->TDCSize);
1392
1393		for (i = 0; i < vptr->tx.numq; i++) {
1394			writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1395			mac_tx_queue_run(regs, i);
1396		}
1397
1398		init_flow_control_register(vptr);
1399
1400		writel(CR0_STOP, &regs->CR0Clr);
1401		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1402
1403		mii_status = velocity_get_opt_media_mode(vptr);
1404		netif_stop_queue(vptr->dev);
1405
1406		mii_init(vptr, mii_status);
1407
1408		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1409			velocity_print_link_status(vptr);
1410			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1411				netif_wake_queue(vptr->dev);
1412		}
1413
1414		enable_flow_control_ability(vptr);
1415		mac_hw_mibs_init(regs);
1416		mac_write_int_mask(vptr->int_mask, regs);
1417		mac_clear_isr(regs);
1418
1419	}
1420}
1421
1422static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1423{
1424	struct mac_regs __iomem *regs = vptr->mac_regs;
1425	int avail, dirty, unusable;
1426
1427	/*
1428	 * RD number must be equal to 4X per hardware spec
1429	 * (programming guide rev 1.20, p.13)
1430	 */
1431	if (vptr->rx.filled < 4)
1432		return;
1433
1434	wmb();
1435
1436	unusable = vptr->rx.filled & 0x0003;
1437	dirty = vptr->rx.dirty - unusable;
1438	for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1439		dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1440		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1441	}
1442
1443	writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1444	vptr->rx.filled = unusable;
1445}
1446
1447/**
1448 *	velocity_init_dma_rings	-	set up DMA rings
1449 *	@vptr: Velocity to set up
1450 *
1451 *	Allocate PCI mapped DMA rings for the receive and transmit layer
1452 *	to use.
1453 */
1454static int velocity_init_dma_rings(struct velocity_info *vptr)
1455{
1456	struct velocity_opt *opt = &vptr->options;
1457	const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1458	const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1459	struct pci_dev *pdev = vptr->pdev;
1460	dma_addr_t pool_dma;
1461	void *pool;
1462	unsigned int i;
1463
1464	/*
1465	 * Allocate all RD/TD rings a single pool.
1466	 *
1467	 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1468	 * alignment
1469	 */
1470	pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1471				    rx_ring_size, &pool_dma);
1472	if (!pool) {
1473		dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1474			vptr->dev->name);
1475		return -ENOMEM;
1476	}
1477
1478	vptr->rx.ring = pool;
1479	vptr->rx.pool_dma = pool_dma;
1480
1481	pool += rx_ring_size;
1482	pool_dma += rx_ring_size;
1483
1484	for (i = 0; i < vptr->tx.numq; i++) {
1485		vptr->tx.rings[i] = pool;
1486		vptr->tx.pool_dma[i] = pool_dma;
1487		pool += tx_ring_size;
1488		pool_dma += tx_ring_size;
1489	}
1490
1491	return 0;
1492}
1493
1494static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1495{
1496	vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1497}
1498
1499/**
1500 *	velocity_alloc_rx_buf	-	allocate aligned receive buffer
1501 *	@vptr: velocity
1502 *	@idx: ring index
1503 *
1504 *	Allocate a new full sized buffer for the reception of a frame and
1505 *	map it into PCI space for the hardware to use. The hardware
1506 *	requires *64* byte alignment of the buffer which makes life
1507 *	less fun than would be ideal.
1508 */
1509static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1510{
1511	struct rx_desc *rd = &(vptr->rx.ring[idx]);
1512	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1513
1514	rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1515	if (rd_info->skb == NULL)
1516		return -ENOMEM;
1517
1518	/*
1519	 *	Do the gymnastics to get the buffer head for data at
1520	 *	64byte alignment.
1521	 */
1522	skb_reserve(rd_info->skb,
1523			64 - ((unsigned long) rd_info->skb->data & 63));
1524	rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1525					vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1526
1527	/*
1528	 *	Fill in the descriptor to match
1529	 */
1530
1531	*((u32 *) & (rd->rdesc0)) = 0;
1532	rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1533	rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1534	rd->pa_high = 0;
1535	return 0;
1536}
1537
1538
1539static int velocity_rx_refill(struct velocity_info *vptr)
1540{
1541	int dirty = vptr->rx.dirty, done = 0;
1542
1543	do {
1544		struct rx_desc *rd = vptr->rx.ring + dirty;
1545
1546		/* Fine for an all zero Rx desc at init time as well */
1547		if (rd->rdesc0.len & OWNED_BY_NIC)
1548			break;
1549
1550		if (!vptr->rx.info[dirty].skb) {
1551			if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1552				break;
1553		}
1554		done++;
1555		dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1556	} while (dirty != vptr->rx.curr);
1557
1558	if (done) {
1559		vptr->rx.dirty = dirty;
1560		vptr->rx.filled += done;
1561	}
1562
1563	return done;
1564}
1565
1566/**
1567 *	velocity_free_rd_ring	-	free receive ring
1568 *	@vptr: velocity to clean up
1569 *
1570 *	Free the receive buffers for each ring slot and any
1571 *	attached socket buffers that need to go away.
1572 */
1573static void velocity_free_rd_ring(struct velocity_info *vptr)
1574{
1575	int i;
1576
1577	if (vptr->rx.info == NULL)
1578		return;
1579
1580	for (i = 0; i < vptr->options.numrx; i++) {
1581		struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1582		struct rx_desc *rd = vptr->rx.ring + i;
1583
1584		memset(rd, 0, sizeof(*rd));
1585
1586		if (!rd_info->skb)
1587			continue;
1588		pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1589				 PCI_DMA_FROMDEVICE);
1590		rd_info->skb_dma = 0;
1591
1592		dev_kfree_skb(rd_info->skb);
1593		rd_info->skb = NULL;
1594	}
1595
1596	kfree(vptr->rx.info);
1597	vptr->rx.info = NULL;
1598}
1599
1600
1601
1602/**
1603 *	velocity_init_rd_ring	-	set up receive ring
1604 *	@vptr: velocity to configure
1605 *
1606 *	Allocate and set up the receive buffers for each ring slot and
1607 *	assign them to the network adapter.
1608 */
1609static int velocity_init_rd_ring(struct velocity_info *vptr)
1610{
1611	int ret = -ENOMEM;
1612
1613	vptr->rx.info = kcalloc(vptr->options.numrx,
1614				sizeof(struct velocity_rd_info), GFP_KERNEL);
1615	if (!vptr->rx.info)
1616		goto out;
1617
1618	velocity_init_rx_ring_indexes(vptr);
1619
1620	if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1621		VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1622			"%s: failed to allocate RX buffer.\n", vptr->dev->name);
1623		velocity_free_rd_ring(vptr);
1624		goto out;
1625	}
1626
1627	ret = 0;
1628out:
1629	return ret;
1630}
1631
1632/**
1633 *	velocity_init_td_ring	-	set up transmit ring
1634 *	@vptr:	velocity
1635 *
1636 *	Set up the transmit ring and chain the ring pointers together.
1637 *	Returns zero on success or a negative posix errno code for
1638 *	failure.
1639 */
1640static int velocity_init_td_ring(struct velocity_info *vptr)
1641{
1642	int j;
1643
1644	/* Init the TD ring entries */
1645	for (j = 0; j < vptr->tx.numq; j++) {
1646
1647		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1648					    sizeof(struct velocity_td_info),
1649					    GFP_KERNEL);
1650		if (!vptr->tx.infos[j])	{
1651			while (--j >= 0)
1652				kfree(vptr->tx.infos[j]);
1653			return -ENOMEM;
1654		}
1655
1656		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1657	}
1658	return 0;
1659}
1660
1661/**
1662 *	velocity_free_dma_rings	-	free PCI ring pointers
1663 *	@vptr: Velocity to free from
1664 *
1665 *	Clean up the PCI ring buffers allocated to this velocity.
1666 */
1667static void velocity_free_dma_rings(struct velocity_info *vptr)
1668{
1669	const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1670		vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1671
1672	pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1673}
1674
1675
1676static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1677{
1678	int ret;
1679
1680	velocity_set_rxbufsize(vptr, mtu);
1681
1682	ret = velocity_init_dma_rings(vptr);
1683	if (ret < 0)
1684		goto out;
1685
1686	ret = velocity_init_rd_ring(vptr);
1687	if (ret < 0)
1688		goto err_free_dma_rings_0;
1689
1690	ret = velocity_init_td_ring(vptr);
1691	if (ret < 0)
1692		goto err_free_rd_ring_1;
1693out:
1694	return ret;
1695
1696err_free_rd_ring_1:
1697	velocity_free_rd_ring(vptr);
1698err_free_dma_rings_0:
1699	velocity_free_dma_rings(vptr);
1700	goto out;
1701}
1702
1703/**
1704 *	velocity_free_tx_buf	-	free transmit buffer
1705 *	@vptr: velocity
1706 *	@tdinfo: buffer
1707 *
1708 *	Release an transmit buffer. If the buffer was preallocated then
1709 *	recycle it, if not then unmap the buffer.
1710 */
1711static void velocity_free_tx_buf(struct velocity_info *vptr,
1712		struct velocity_td_info *tdinfo, struct tx_desc *td)
1713{
1714	struct sk_buff *skb = tdinfo->skb;
1715
1716	/*
1717	 *	Don't unmap the pre-allocated tx_bufs
1718	 */
1719	if (tdinfo->skb_dma) {
1720		int i;
1721
1722		for (i = 0; i < tdinfo->nskb_dma; i++) {
1723			size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1724
1725			/* For scatter-gather */
1726			if (skb_shinfo(skb)->nr_frags > 0)
1727				pktlen = max_t(size_t, pktlen,
1728						td->td_buf[i].size & ~TD_QUEUE);
1729
1730			pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1731					le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
1732		}
1733	}
1734	dev_kfree_skb_irq(skb);
1735	tdinfo->skb = NULL;
1736}
1737
1738
1739/*
1740 *	FIXME: could we merge this with velocity_free_tx_buf ?
1741 */
1742static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1743							 int q, int n)
1744{
1745	struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1746	int i;
1747
1748	if (td_info == NULL)
1749		return;
1750
1751	if (td_info->skb) {
1752		for (i = 0; i < td_info->nskb_dma; i++) {
1753			if (td_info->skb_dma[i]) {
1754				pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1755					td_info->skb->len, PCI_DMA_TODEVICE);
1756				td_info->skb_dma[i] = 0;
1757			}
1758		}
1759		dev_kfree_skb(td_info->skb);
1760		td_info->skb = NULL;
1761	}
1762}
1763
1764/**
1765 *	velocity_free_td_ring	-	free td ring
1766 *	@vptr: velocity
1767 *
1768 *	Free up the transmit ring for this particular velocity adapter.
1769 *	We free the ring contents but not the ring itself.
1770 */
1771static void velocity_free_td_ring(struct velocity_info *vptr)
1772{
1773	int i, j;
1774
1775	for (j = 0; j < vptr->tx.numq; j++) {
1776		if (vptr->tx.infos[j] == NULL)
1777			continue;
1778		for (i = 0; i < vptr->options.numtx; i++)
1779			velocity_free_td_ring_entry(vptr, j, i);
1780
1781		kfree(vptr->tx.infos[j]);
1782		vptr->tx.infos[j] = NULL;
1783	}
1784}
1785
1786
1787static void velocity_free_rings(struct velocity_info *vptr)
1788{
1789	velocity_free_td_ring(vptr);
1790	velocity_free_rd_ring(vptr);
1791	velocity_free_dma_rings(vptr);
1792}
1793
1794/**
1795 *	velocity_error	-	handle error from controller
1796 *	@vptr: velocity
1797 *	@status: card status
1798 *
1799 *	Process an error report from the hardware and attempt to recover
1800 *	the card itself. At the moment we cannot recover from some
1801 *	theoretically impossible errors but this could be fixed using
1802 *	the pci_device_failed logic to bounce the hardware
1803 *
1804 */
1805static void velocity_error(struct velocity_info *vptr, int status)
1806{
1807
1808	if (status & ISR_TXSTLI) {
1809		struct mac_regs __iomem *regs = vptr->mac_regs;
1810
1811		printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1812		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1813		writew(TRDCSR_RUN, &regs->TDCSRClr);
1814		netif_stop_queue(vptr->dev);
1815
1816		/* FIXME: port over the pci_device_failed code and use it
1817		   here */
1818	}
1819
1820	if (status & ISR_SRCI) {
1821		struct mac_regs __iomem *regs = vptr->mac_regs;
1822		int linked;
1823
1824		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1825			vptr->mii_status = check_connection_type(regs);
1826
1827			/*
1828			 *	If it is a 3119, disable frame bursting in
1829			 *	halfduplex mode and enable it in fullduplex
1830			 *	 mode
1831			 */
1832			if (vptr->rev_id < REV_ID_VT3216_A0) {
1833				if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1834					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1835				else
1836					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1837			}
1838			/*
1839			 *	Only enable CD heart beat counter in 10HD mode
1840			 */
1841			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1842				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1843			else
1844				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1845
1846			setup_queue_timers(vptr);
1847		}
1848		/*
1849		 *	Get link status from PHYSR0
1850		 */
1851		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1852
1853		if (linked) {
1854			vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1855			netif_carrier_on(vptr->dev);
1856		} else {
1857			vptr->mii_status |= VELOCITY_LINK_FAIL;
1858			netif_carrier_off(vptr->dev);
1859		}
1860
1861		velocity_print_link_status(vptr);
1862		enable_flow_control_ability(vptr);
1863
1864		/*
1865		 *	Re-enable auto-polling because SRCI will disable
1866		 *	auto-polling
1867		 */
1868
1869		enable_mii_autopoll(regs);
1870
1871		if (vptr->mii_status & VELOCITY_LINK_FAIL)
1872			netif_stop_queue(vptr->dev);
1873		else
1874			netif_wake_queue(vptr->dev);
1875
1876	}
1877	if (status & ISR_MIBFI)
1878		velocity_update_hw_mibs(vptr);
1879	if (status & ISR_LSTEI)
1880		mac_rx_queue_wake(vptr->mac_regs);
1881}
1882
1883/**
1884 *	tx_srv		-	transmit interrupt service
1885 *	@vptr; Velocity
1886 *
1887 *	Scan the queues looking for transmitted packets that
1888 *	we can complete and clean up. Update any statistics as
1889 *	necessary/
1890 */
1891static int velocity_tx_srv(struct velocity_info *vptr)
1892{
1893	struct tx_desc *td;
1894	int qnum;
1895	int full = 0;
1896	int idx;
1897	int works = 0;
1898	struct velocity_td_info *tdinfo;
1899	struct net_device_stats *stats = &vptr->dev->stats;
1900
1901	for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1902		for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1903			idx = (idx + 1) % vptr->options.numtx) {
1904
1905			/*
1906			 *	Get Tx Descriptor
1907			 */
1908			td = &(vptr->tx.rings[qnum][idx]);
1909			tdinfo = &(vptr->tx.infos[qnum][idx]);
1910
1911			if (td->tdesc0.len & OWNED_BY_NIC)
1912				break;
1913
1914			if ((works++ > 15))
1915				break;
1916
1917			if (td->tdesc0.TSR & TSR0_TERR) {
1918				stats->tx_errors++;
1919				stats->tx_dropped++;
1920				if (td->tdesc0.TSR & TSR0_CDH)
1921					stats->tx_heartbeat_errors++;
1922				if (td->tdesc0.TSR & TSR0_CRS)
1923					stats->tx_carrier_errors++;
1924				if (td->tdesc0.TSR & TSR0_ABT)
1925					stats->tx_aborted_errors++;
1926				if (td->tdesc0.TSR & TSR0_OWC)
1927					stats->tx_window_errors++;
1928			} else {
1929				stats->tx_packets++;
1930				stats->tx_bytes += tdinfo->skb->len;
1931			}
1932			velocity_free_tx_buf(vptr, tdinfo, td);
1933			vptr->tx.used[qnum]--;
1934		}
1935		vptr->tx.tail[qnum] = idx;
1936
1937		if (AVAIL_TD(vptr, qnum) < 1)
1938			full = 1;
1939	}
1940	/*
1941	 *	Look to see if we should kick the transmit network
1942	 *	layer for more work.
1943	 */
1944	if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1945	    (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1946		netif_wake_queue(vptr->dev);
1947	}
1948	return works;
1949}
1950
1951/**
1952 *	velocity_rx_csum	-	checksum process
1953 *	@rd: receive packet descriptor
1954 *	@skb: network layer packet buffer
1955 *
1956 *	Process the status bits for the received packet and determine
1957 *	if the checksum was computed and verified by the hardware
1958 */
1959static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1960{
1961	skb_checksum_none_assert(skb);
1962
1963	if (rd->rdesc1.CSM & CSM_IPKT) {
1964		if (rd->rdesc1.CSM & CSM_IPOK) {
1965			if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1966					(rd->rdesc1.CSM & CSM_UDPKT)) {
1967				if (!(rd->rdesc1.CSM & CSM_TUPOK))
1968					return;
1969			}
1970			skb->ip_summed = CHECKSUM_UNNECESSARY;
1971		}
1972	}
1973}
1974
1975/**
1976 *	velocity_rx_copy	-	in place Rx copy for small packets
1977 *	@rx_skb: network layer packet buffer candidate
1978 *	@pkt_size: received data size
1979 *	@rd: receive packet descriptor
1980 *	@dev: network device
1981 *
1982 *	Replace the current skb that is scheduled for Rx processing by a
1983 *	shorter, immediately allocated skb, if the received packet is small
1984 *	enough. This function returns a negative value if the received
1985 *	packet is too big or if memory is exhausted.
1986 */
1987static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1988			    struct velocity_info *vptr)
1989{
1990	int ret = -1;
1991	if (pkt_size < rx_copybreak) {
1992		struct sk_buff *new_skb;
1993
1994		new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1995		if (new_skb) {
1996			new_skb->ip_summed = rx_skb[0]->ip_summed;
1997			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1998			*rx_skb = new_skb;
1999			ret = 0;
2000		}
2001
2002	}
2003	return ret;
2004}
2005
2006/**
2007 *	velocity_iph_realign	-	IP header alignment
2008 *	@vptr: velocity we are handling
2009 *	@skb: network layer packet buffer
2010 *	@pkt_size: received data size
2011 *
2012 *	Align IP header on a 2 bytes boundary. This behavior can be
2013 *	configured by the user.
2014 */
2015static inline void velocity_iph_realign(struct velocity_info *vptr,
2016					struct sk_buff *skb, int pkt_size)
2017{
2018	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2019		memmove(skb->data + 2, skb->data, pkt_size);
2020		skb_reserve(skb, 2);
2021	}
2022}
2023
2024
2025/**
2026 *	velocity_receive_frame	-	received packet processor
2027 *	@vptr: velocity we are handling
2028 *	@idx: ring index
2029 *
2030 *	A packet has arrived. We process the packet and if appropriate
2031 *	pass the frame up the network stack
2032 */
2033static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2034{
2035	void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2036	struct net_device_stats *stats = &vptr->dev->stats;
2037	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2038	struct rx_desc *rd = &(vptr->rx.ring[idx]);
2039	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2040	struct sk_buff *skb;
2041
2042	if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2043		VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2044		stats->rx_length_errors++;
2045		return -EINVAL;
2046	}
2047
2048	if (rd->rdesc0.RSR & RSR_MAR)
2049		stats->multicast++;
2050
2051	skb = rd_info->skb;
2052
2053	pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2054				    vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
2055
2056	/*
2057	 *	Drop frame not meeting IEEE 802.3
2058	 */
2059
2060	if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2061		if (rd->rdesc0.RSR & RSR_RL) {
2062			stats->rx_length_errors++;
2063			return -EINVAL;
2064		}
2065	}
2066
2067	pci_action = pci_dma_sync_single_for_device;
2068
2069	velocity_rx_csum(rd, skb);
2070
2071	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2072		velocity_iph_realign(vptr, skb, pkt_len);
2073		pci_action = pci_unmap_single;
2074		rd_info->skb = NULL;
2075	}
2076
2077	pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2078		   PCI_DMA_FROMDEVICE);
2079
2080	skb_put(skb, pkt_len - 4);
2081	skb->protocol = eth_type_trans(skb, vptr->dev);
2082
2083	if (rd->rdesc0.RSR & RSR_DETAG) {
2084		u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2085
2086		__vlan_hwaccel_put_tag(skb, vid);
2087	}
2088	netif_rx(skb);
2089
2090	stats->rx_bytes += pkt_len;
2091
2092	return 0;
2093}
2094
2095
2096/**
2097 *	velocity_rx_srv		-	service RX interrupt
2098 *	@vptr: velocity
2099 *
2100 *	Walk the receive ring of the velocity adapter and remove
2101 *	any received packets from the receive queue. Hand the ring
2102 *	slots back to the adapter for reuse.
2103 */
2104static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2105{
2106	struct net_device_stats *stats = &vptr->dev->stats;
2107	int rd_curr = vptr->rx.curr;
2108	int works = 0;
2109
2110	while (works < budget_left) {
2111		struct rx_desc *rd = vptr->rx.ring + rd_curr;
2112
2113		if (!vptr->rx.info[rd_curr].skb)
2114			break;
2115
2116		if (rd->rdesc0.len & OWNED_BY_NIC)
2117			break;
2118
2119		rmb();
2120
2121		/*
2122		 *	Don't drop CE or RL error frame although RXOK is off
2123		 */
2124		if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2125			if (velocity_receive_frame(vptr, rd_curr) < 0)
2126				stats->rx_dropped++;
2127		} else {
2128			if (rd->rdesc0.RSR & RSR_CRC)
2129				stats->rx_crc_errors++;
2130			if (rd->rdesc0.RSR & RSR_FAE)
2131				stats->rx_frame_errors++;
2132
2133			stats->rx_dropped++;
2134		}
2135
2136		rd->size |= RX_INTEN;
2137
2138		rd_curr++;
2139		if (rd_curr >= vptr->options.numrx)
2140			rd_curr = 0;
2141		works++;
2142	}
2143
2144	vptr->rx.curr = rd_curr;
2145
2146	if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2147		velocity_give_many_rx_descs(vptr);
2148
2149	VAR_USED(stats);
2150	return works;
2151}
2152
2153static int velocity_poll(struct napi_struct *napi, int budget)
2154{
2155	struct velocity_info *vptr = container_of(napi,
2156			struct velocity_info, napi);
2157	unsigned int rx_done;
2158	unsigned long flags;
2159
2160	spin_lock_irqsave(&vptr->lock, flags);
2161	/*
2162	 * Do rx and tx twice for performance (taken from the VIA
2163	 * out-of-tree driver).
2164	 */
2165	rx_done = velocity_rx_srv(vptr, budget / 2);
2166	velocity_tx_srv(vptr);
2167	rx_done += velocity_rx_srv(vptr, budget - rx_done);
2168	velocity_tx_srv(vptr);
2169
2170	/* If budget not fully consumed, exit the polling mode */
2171	if (rx_done < budget) {
2172		napi_complete(napi);
2173		mac_enable_int(vptr->mac_regs);
2174	}
2175	spin_unlock_irqrestore(&vptr->lock, flags);
2176
2177	return rx_done;
2178}
2179
2180/**
2181 *	velocity_intr		-	interrupt callback
2182 *	@irq: interrupt number
2183 *	@dev_instance: interrupting device
2184 *
2185 *	Called whenever an interrupt is generated by the velocity
2186 *	adapter IRQ line. We may not be the source of the interrupt
2187 *	and need to identify initially if we are, and if not exit as
2188 *	efficiently as possible.
2189 */
2190static irqreturn_t velocity_intr(int irq, void *dev_instance)
2191{
2192	struct net_device *dev = dev_instance;
2193	struct velocity_info *vptr = netdev_priv(dev);
2194	u32 isr_status;
2195
2196	spin_lock(&vptr->lock);
2197	isr_status = mac_read_isr(vptr->mac_regs);
2198
2199	/* Not us ? */
2200	if (isr_status == 0) {
2201		spin_unlock(&vptr->lock);
2202		return IRQ_NONE;
2203	}
2204
2205	/* Ack the interrupt */
2206	mac_write_isr(vptr->mac_regs, isr_status);
2207
2208	if (likely(napi_schedule_prep(&vptr->napi))) {
2209		mac_disable_int(vptr->mac_regs);
2210		__napi_schedule(&vptr->napi);
2211	}
2212
2213	if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2214		velocity_error(vptr, isr_status);
2215
2216	spin_unlock(&vptr->lock);
2217
2218	return IRQ_HANDLED;
2219}
2220
2221/**
2222 *	velocity_open		-	interface activation callback
2223 *	@dev: network layer device to open
2224 *
2225 *	Called when the network layer brings the interface up. Returns
2226 *	a negative posix error code on failure, or zero on success.
2227 *
2228 *	All the ring allocation and set up is done on open for this
2229 *	adapter to minimise memory usage when inactive
2230 */
2231static int velocity_open(struct net_device *dev)
2232{
2233	struct velocity_info *vptr = netdev_priv(dev);
2234	int ret;
2235
2236	ret = velocity_init_rings(vptr, dev->mtu);
2237	if (ret < 0)
2238		goto out;
2239
2240	/* Ensure chip is running */
2241	pci_set_power_state(vptr->pdev, PCI_D0);
2242
2243	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2244
2245	ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2246			  dev->name, dev);
2247	if (ret < 0) {
2248		/* Power down the chip */
2249		pci_set_power_state(vptr->pdev, PCI_D3hot);
2250		velocity_free_rings(vptr);
2251		goto out;
2252	}
2253
2254	velocity_give_many_rx_descs(vptr);
2255
2256	mac_enable_int(vptr->mac_regs);
2257	netif_start_queue(dev);
2258	napi_enable(&vptr->napi);
2259	vptr->flags |= VELOCITY_FLAGS_OPENED;
2260out:
2261	return ret;
2262}
2263
2264/**
2265 *	velocity_shutdown	-	shut down the chip
2266 *	@vptr: velocity to deactivate
2267 *
2268 *	Shuts down the internal operations of the velocity and
2269 *	disables interrupts, autopolling, transmit and receive
2270 */
2271static void velocity_shutdown(struct velocity_info *vptr)
2272{
2273	struct mac_regs __iomem *regs = vptr->mac_regs;
2274	mac_disable_int(regs);
2275	writel(CR0_STOP, &regs->CR0Set);
2276	writew(0xFFFF, &regs->TDCSRClr);
2277	writeb(0xFF, &regs->RDCSRClr);
2278	safe_disable_mii_autopoll(regs);
2279	mac_clear_isr(regs);
2280}
2281
2282/**
2283 *	velocity_change_mtu	-	MTU change callback
2284 *	@dev: network device
2285 *	@new_mtu: desired MTU
2286 *
2287 *	Handle requests from the networking layer for MTU change on
2288 *	this interface. It gets called on a change by the network layer.
2289 *	Return zero for success or negative posix error code.
2290 */
2291static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2292{
2293	struct velocity_info *vptr = netdev_priv(dev);
2294	int ret = 0;
2295
2296	if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2297		VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2298				vptr->dev->name);
2299		ret = -EINVAL;
2300		goto out_0;
2301	}
2302
2303	if (!netif_running(dev)) {
2304		dev->mtu = new_mtu;
2305		goto out_0;
2306	}
2307
2308	if (dev->mtu != new_mtu) {
2309		struct velocity_info *tmp_vptr;
2310		unsigned long flags;
2311		struct rx_info rx;
2312		struct tx_info tx;
2313
2314		tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2315		if (!tmp_vptr) {
2316			ret = -ENOMEM;
2317			goto out_0;
2318		}
2319
2320		tmp_vptr->dev = dev;
2321		tmp_vptr->pdev = vptr->pdev;
2322		tmp_vptr->options = vptr->options;
2323		tmp_vptr->tx.numq = vptr->tx.numq;
2324
2325		ret = velocity_init_rings(tmp_vptr, new_mtu);
2326		if (ret < 0)
2327			goto out_free_tmp_vptr_1;
2328
2329		spin_lock_irqsave(&vptr->lock, flags);
2330
2331		netif_stop_queue(dev);
2332		velocity_shutdown(vptr);
2333
2334		rx = vptr->rx;
2335		tx = vptr->tx;
2336
2337		vptr->rx = tmp_vptr->rx;
2338		vptr->tx = tmp_vptr->tx;
2339
2340		tmp_vptr->rx = rx;
2341		tmp_vptr->tx = tx;
2342
2343		dev->mtu = new_mtu;
2344
2345		velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2346
2347		velocity_give_many_rx_descs(vptr);
2348
2349		mac_enable_int(vptr->mac_regs);
2350		netif_start_queue(dev);
2351
2352		spin_unlock_irqrestore(&vptr->lock, flags);
2353
2354		velocity_free_rings(tmp_vptr);
2355
2356out_free_tmp_vptr_1:
2357		kfree(tmp_vptr);
2358	}
2359out_0:
2360	return ret;
2361}
2362
2363/**
2364 *	velocity_mii_ioctl		-	MII ioctl handler
2365 *	@dev: network device
2366 *	@ifr: the ifreq block for the ioctl
2367 *	@cmd: the command
2368 *
2369 *	Process MII requests made via ioctl from the network layer. These
2370 *	are used by tools like kudzu to interrogate the link state of the
2371 *	hardware
2372 */
2373static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2374{
2375	struct velocity_info *vptr = netdev_priv(dev);
2376	struct mac_regs __iomem *regs = vptr->mac_regs;
2377	unsigned long flags;
2378	struct mii_ioctl_data *miidata = if_mii(ifr);
2379	int err;
2380
2381	switch (cmd) {
2382	case SIOCGMIIPHY:
2383		miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2384		break;
2385	case SIOCGMIIREG:
2386		if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2387			return -ETIMEDOUT;
2388		break;
2389	case SIOCSMIIREG:
2390		spin_lock_irqsave(&vptr->lock, flags);
2391		err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2392		spin_unlock_irqrestore(&vptr->lock, flags);
2393		check_connection_type(vptr->mac_regs);
2394		if (err)
2395			return err;
2396		break;
2397	default:
2398		return -EOPNOTSUPP;
2399	}
2400	return 0;
2401}
2402
2403
2404/**
2405 *	velocity_ioctl		-	ioctl entry point
2406 *	@dev: network device
2407 *	@rq: interface request ioctl
2408 *	@cmd: command code
2409 *
2410 *	Called when the user issues an ioctl request to the network
2411 *	device in question. The velocity interface supports MII.
2412 */
2413static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2414{
2415	struct velocity_info *vptr = netdev_priv(dev);
2416	int ret;
2417
2418	/* If we are asked for information and the device is power
2419	   saving then we need to bring the device back up to talk to it */
2420
2421	if (!netif_running(dev))
2422		pci_set_power_state(vptr->pdev, PCI_D0);
2423
2424	switch (cmd) {
2425	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
2426	case SIOCGMIIREG:	/* Read MII PHY register. */
2427	case SIOCSMIIREG:	/* Write to MII PHY register. */
2428		ret = velocity_mii_ioctl(dev, rq, cmd);
2429		break;
2430
2431	default:
2432		ret = -EOPNOTSUPP;
2433	}
2434	if (!netif_running(dev))
2435		pci_set_power_state(vptr->pdev, PCI_D3hot);
2436
2437
2438	return ret;
2439}
2440
2441/**
2442 *	velocity_get_status	-	statistics callback
2443 *	@dev: network device
2444 *
2445 *	Callback from the network layer to allow driver statistics
2446 *	to be resynchronized with hardware collected state. In the
2447 *	case of the velocity we need to pull the MIB counters from
2448 *	the hardware into the counters before letting the network
2449 *	layer display them.
2450 */
2451static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2452{
2453	struct velocity_info *vptr = netdev_priv(dev);
2454
2455	/* If the hardware is down, don't touch MII */
2456	if (!netif_running(dev))
2457		return &dev->stats;
2458
2459	spin_lock_irq(&vptr->lock);
2460	velocity_update_hw_mibs(vptr);
2461	spin_unlock_irq(&vptr->lock);
2462
2463	dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2464	dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2465	dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2466
2467//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2468	dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2469	/* detailed rx_errors: */
2470//  unsigned long   rx_length_errors;
2471//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2472	dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2473//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2474//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2475//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2476
2477	/* detailed tx_errors */
2478//  unsigned long   tx_fifo_errors;
2479
2480	return &dev->stats;
2481}
2482
2483/**
2484 *	velocity_close		-	close adapter callback
2485 *	@dev: network device
2486 *
2487 *	Callback from the network layer when the velocity is being
2488 *	deactivated by the network layer
2489 */
2490static int velocity_close(struct net_device *dev)
2491{
2492	struct velocity_info *vptr = netdev_priv(dev);
2493
2494	napi_disable(&vptr->napi);
2495	netif_stop_queue(dev);
2496	velocity_shutdown(vptr);
2497
2498	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2499		velocity_get_ip(vptr);
2500	if (dev->irq != 0)
2501		free_irq(dev->irq, dev);
2502
2503	/* Power down the chip */
2504	pci_set_power_state(vptr->pdev, PCI_D3hot);
2505
2506	velocity_free_rings(vptr);
2507
2508	vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2509	return 0;
2510}
2511
2512/**
2513 *	velocity_xmit		-	transmit packet callback
2514 *	@skb: buffer to transmit
2515 *	@dev: network device
2516 *
2517 *	Called by the networ layer to request a packet is queued to
2518 *	the velocity. Returns zero on success.
2519 */
2520static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2521				 struct net_device *dev)
2522{
2523	struct velocity_info *vptr = netdev_priv(dev);
2524	int qnum = 0;
2525	struct tx_desc *td_ptr;
2526	struct velocity_td_info *tdinfo;
2527	unsigned long flags;
2528	int pktlen;
2529	int index, prev;
2530	int i = 0;
2531
2532	if (skb_padto(skb, ETH_ZLEN))
2533		goto out;
2534
2535	/* The hardware can handle at most 7 memory segments, so merge
2536	 * the skb if there are more */
2537	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2538		kfree_skb(skb);
2539		return NETDEV_TX_OK;
2540	}
2541
2542	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2543			max_t(unsigned int, skb->len, ETH_ZLEN) :
2544				skb_headlen(skb);
2545
2546	spin_lock_irqsave(&vptr->lock, flags);
2547
2548	index = vptr->tx.curr[qnum];
2549	td_ptr = &(vptr->tx.rings[qnum][index]);
2550	tdinfo = &(vptr->tx.infos[qnum][index]);
2551
2552	td_ptr->tdesc1.TCR = TCR0_TIC;
2553	td_ptr->td_buf[0].size &= ~TD_QUEUE;
2554
2555	/*
2556	 *	Map the linear network buffer into PCI space and
2557	 *	add it to the transmit ring.
2558	 */
2559	tdinfo->skb = skb;
2560	tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2561	td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2562	td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2563	td_ptr->td_buf[0].pa_high = 0;
2564	td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2565
2566	/* Handle fragments */
2567	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2568		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2569
2570		tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
2571				frag->page_offset, frag->size,
2572				PCI_DMA_TODEVICE);
2573
2574		td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2575		td_ptr->td_buf[i + 1].pa_high = 0;
2576		td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
2577	}
2578	tdinfo->nskb_dma = i + 1;
2579
2580	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2581
2582	if (vlan_tx_tag_present(skb)) {
2583		td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2584		td_ptr->tdesc1.TCR |= TCR0_VETAG;
2585	}
2586
2587	/*
2588	 *	Handle hardware checksum
2589	 */
2590	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2591		const struct iphdr *ip = ip_hdr(skb);
2592		if (ip->protocol == IPPROTO_TCP)
2593			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2594		else if (ip->protocol == IPPROTO_UDP)
2595			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2596		td_ptr->tdesc1.TCR |= TCR0_IPCK;
2597	}
2598
2599	prev = index - 1;
2600	if (prev < 0)
2601		prev = vptr->options.numtx - 1;
2602	td_ptr->tdesc0.len |= OWNED_BY_NIC;
2603	vptr->tx.used[qnum]++;
2604	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2605
2606	if (AVAIL_TD(vptr, qnum) < 1)
2607		netif_stop_queue(dev);
2608
2609	td_ptr = &(vptr->tx.rings[qnum][prev]);
2610	td_ptr->td_buf[0].size |= TD_QUEUE;
2611	mac_tx_queue_wake(vptr->mac_regs, qnum);
2612
2613	spin_unlock_irqrestore(&vptr->lock, flags);
2614out:
2615	return NETDEV_TX_OK;
2616}
2617
2618
2619static const struct net_device_ops velocity_netdev_ops = {
2620	.ndo_open		= velocity_open,
2621	.ndo_stop		= velocity_close,
2622	.ndo_start_xmit		= velocity_xmit,
2623	.ndo_get_stats		= velocity_get_stats,
2624	.ndo_validate_addr	= eth_validate_addr,
2625	.ndo_set_mac_address 	= eth_mac_addr,
2626	.ndo_set_multicast_list	= velocity_set_multi,
2627	.ndo_change_mtu		= velocity_change_mtu,
2628	.ndo_do_ioctl		= velocity_ioctl,
2629	.ndo_vlan_rx_add_vid	= velocity_vlan_rx_add_vid,
2630	.ndo_vlan_rx_kill_vid	= velocity_vlan_rx_kill_vid,
2631};
2632
2633/**
2634 *	velocity_init_info	-	init private data
2635 *	@pdev: PCI device
2636 *	@vptr: Velocity info
2637 *	@info: Board type
2638 *
2639 *	Set up the initial velocity_info struct for the device that has been
2640 *	discovered.
2641 */
2642static void __devinit velocity_init_info(struct pci_dev *pdev,
2643					 struct velocity_info *vptr,
2644					 const struct velocity_info_tbl *info)
2645{
2646	memset(vptr, 0, sizeof(struct velocity_info));
2647
2648	vptr->pdev = pdev;
2649	vptr->chip_id = info->chip_id;
2650	vptr->tx.numq = info->txqueue;
2651	vptr->multicast_limit = MCAM_SIZE;
2652	spin_lock_init(&vptr->lock);
2653}
2654
2655/**
2656 *	velocity_get_pci_info	-	retrieve PCI info for device
2657 *	@vptr: velocity device
2658 *	@pdev: PCI device it matches
2659 *
2660 *	Retrieve the PCI configuration space data that interests us from
2661 *	the kernel PCI layer
2662 */
2663static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
2664{
2665	vptr->rev_id = pdev->revision;
2666
2667	pci_set_master(pdev);
2668
2669	vptr->ioaddr = pci_resource_start(pdev, 0);
2670	vptr->memaddr = pci_resource_start(pdev, 1);
2671
2672	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2673		dev_err(&pdev->dev,
2674			   "region #0 is not an I/O resource, aborting.\n");
2675		return -EINVAL;
2676	}
2677
2678	if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2679		dev_err(&pdev->dev,
2680			   "region #1 is an I/O resource, aborting.\n");
2681		return -EINVAL;
2682	}
2683
2684	if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2685		dev_err(&pdev->dev, "region #1 is too small.\n");
2686		return -EINVAL;
2687	}
2688	vptr->pdev = pdev;
2689
2690	return 0;
2691}
2692
2693/**
2694 *	velocity_print_info	-	per driver data
2695 *	@vptr: velocity
2696 *
2697 *	Print per driver data as the kernel driver finds Velocity
2698 *	hardware
2699 */
2700static void __devinit velocity_print_info(struct velocity_info *vptr)
2701{
2702	struct net_device *dev = vptr->dev;
2703
2704	printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2705	printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2706		dev->name, dev->dev_addr);
2707}
2708
2709static u32 velocity_get_link(struct net_device *dev)
2710{
2711	struct velocity_info *vptr = netdev_priv(dev);
2712	struct mac_regs __iomem *regs = vptr->mac_regs;
2713	return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2714}
2715
2716
2717/**
2718 *	velocity_found1		-	set up discovered velocity card
2719 *	@pdev: PCI device
2720 *	@ent: PCI device table entry that matched
2721 *
2722 *	Configure a discovered adapter from scratch. Return a negative
2723 *	errno error code on failure paths.
2724 */
2725static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
2726{
2727	static int first = 1;
2728	struct net_device *dev;
2729	int i;
2730	const char *drv_string;
2731	const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2732	struct velocity_info *vptr;
2733	struct mac_regs __iomem *regs;
2734	int ret = -ENOMEM;
2735
2736	/* FIXME: this driver, like almost all other ethernet drivers,
2737	 * can support more than MAX_UNITS.
2738	 */
2739	if (velocity_nics >= MAX_UNITS) {
2740		dev_notice(&pdev->dev, "already found %d NICs.\n",
2741			   velocity_nics);
2742		return -ENODEV;
2743	}
2744
2745	dev = alloc_etherdev(sizeof(struct velocity_info));
2746	if (!dev) {
2747		dev_err(&pdev->dev, "allocate net device failed.\n");
2748		goto out;
2749	}
2750
2751	/* Chain it all together */
2752
2753	SET_NETDEV_DEV(dev, &pdev->dev);
2754	vptr = netdev_priv(dev);
2755
2756
2757	if (first) {
2758		printk(KERN_INFO "%s Ver. %s\n",
2759			VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2760		printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2761		printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2762		first = 0;
2763	}
2764
2765	velocity_init_info(pdev, vptr, info);
2766
2767	vptr->dev = dev;
2768
2769	ret = pci_enable_device(pdev);
2770	if (ret < 0)
2771		goto err_free_dev;
2772
2773	dev->irq = pdev->irq;
2774
2775	ret = velocity_get_pci_info(vptr, pdev);
2776	if (ret < 0) {
2777		/* error message already printed */
2778		goto err_disable;
2779	}
2780
2781	ret = pci_request_regions(pdev, VELOCITY_NAME);
2782	if (ret < 0) {
2783		dev_err(&pdev->dev, "No PCI resources.\n");
2784		goto err_disable;
2785	}
2786
2787	regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2788	if (regs == NULL) {
2789		ret = -EIO;
2790		goto err_release_res;
2791	}
2792
2793	vptr->mac_regs = regs;
2794
2795	mac_wol_reset(regs);
2796
2797	dev->base_addr = vptr->ioaddr;
2798
2799	for (i = 0; i < 6; i++)
2800		dev->dev_addr[i] = readb(&regs->PAR[i]);
2801
2802
2803	drv_string = dev_driver_string(&pdev->dev);
2804
2805	velocity_get_options(&vptr->options, velocity_nics, drv_string);
2806
2807	/*
2808	 *	Mask out the options cannot be set to the chip
2809	 */
2810
2811	vptr->options.flags &= info->flags;
2812
2813	/*
2814	 *	Enable the chip specified capbilities
2815	 */
2816
2817	vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2818
2819	vptr->wol_opts = vptr->options.wol_opts;
2820	vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2821
2822	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2823
2824	dev->irq = pdev->irq;
2825	dev->netdev_ops = &velocity_netdev_ops;
2826	dev->ethtool_ops = &velocity_ethtool_ops;
2827	netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2828
2829	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
2830	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2831		NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2832
2833	ret = register_netdev(dev);
2834	if (ret < 0)
2835		goto err_iounmap;
2836
2837	if (!velocity_get_link(dev)) {
2838		netif_carrier_off(dev);
2839		vptr->mii_status |= VELOCITY_LINK_FAIL;
2840	}
2841
2842	velocity_print_info(vptr);
2843	pci_set_drvdata(pdev, dev);
2844
2845	/* and leave the chip powered down */
2846
2847	pci_set_power_state(pdev, PCI_D3hot);
2848	velocity_nics++;
2849out:
2850	return ret;
2851
2852err_iounmap:
2853	iounmap(regs);
2854err_release_res:
2855	pci_release_regions(pdev);
2856err_disable:
2857	pci_disable_device(pdev);
2858err_free_dev:
2859	free_netdev(dev);
2860	goto out;
2861}
2862
2863
2864#ifdef CONFIG_PM
2865/**
2866 *	wol_calc_crc		-	WOL CRC
2867 *	@pattern: data pattern
2868 *	@mask_pattern: mask
2869 *
2870 *	Compute the wake on lan crc hashes for the packet header
2871 *	we are interested in.
2872 */
2873static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2874{
2875	u16 crc = 0xFFFF;
2876	u8 mask;
2877	int i, j;
2878
2879	for (i = 0; i < size; i++) {
2880		mask = mask_pattern[i];
2881
2882		/* Skip this loop if the mask equals to zero */
2883		if (mask == 0x00)
2884			continue;
2885
2886		for (j = 0; j < 8; j++) {
2887			if ((mask & 0x01) == 0) {
2888				mask >>= 1;
2889				continue;
2890			}
2891			mask >>= 1;
2892			crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2893		}
2894	}
2895	/*	Finally, invert the result once to get the correct data */
2896	crc = ~crc;
2897	return bitrev32(crc) >> 16;
2898}
2899
2900/**
2901 *	velocity_set_wol	-	set up for wake on lan
2902 *	@vptr: velocity to set WOL status on
2903 *
2904 *	Set a card up for wake on lan either by unicast or by
2905 *	ARP packet.
2906 *
2907 *	FIXME: check static buffer is safe here
2908 */
2909static int velocity_set_wol(struct velocity_info *vptr)
2910{
2911	struct mac_regs __iomem *regs = vptr->mac_regs;
2912	enum speed_opt spd_dpx = vptr->options.spd_dpx;
2913	static u8 buf[256];
2914	int i;
2915
2916	static u32 mask_pattern[2][4] = {
2917		{0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2918		{0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}	 /* Magic Packet */
2919	};
2920
2921	writew(0xFFFF, &regs->WOLCRClr);
2922	writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2923	writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
2924
2925	/*
2926	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
2927	   writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2928	 */
2929
2930	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2931		writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
2932
2933	if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2934		struct arp_packet *arp = (struct arp_packet *) buf;
2935		u16 crc;
2936		memset(buf, 0, sizeof(struct arp_packet) + 7);
2937
2938		for (i = 0; i < 4; i++)
2939			writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
2940
2941		arp->type = htons(ETH_P_ARP);
2942		arp->ar_op = htons(1);
2943
2944		memcpy(arp->ar_tip, vptr->ip_addr, 4);
2945
2946		crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2947				(u8 *) & mask_pattern[0][0]);
2948
2949		writew(crc, &regs->PatternCRC[0]);
2950		writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2951	}
2952
2953	BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2954	BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2955
2956	writew(0x0FFF, &regs->WOLSRClr);
2957
2958	if (spd_dpx == SPD_DPX_1000_FULL)
2959		goto mac_done;
2960
2961	if (spd_dpx != SPD_DPX_AUTO)
2962		goto advertise_done;
2963
2964	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2965		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2966			MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2967
2968		MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2969	}
2970
2971	if (vptr->mii_status & VELOCITY_SPEED_1000)
2972		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2973
2974advertise_done:
2975	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2976
2977	{
2978		u8 GCR;
2979		GCR = readb(&regs->CHIPGCR);
2980		GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2981		writeb(GCR, &regs->CHIPGCR);
2982	}
2983
2984mac_done:
2985	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2986	/* Turn on SWPTAG just before entering power mode */
2987	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
2988	/* Go to bed ..... */
2989	BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
2990
2991	return 0;
2992}
2993
2994/**
2995 *	velocity_save_context	-	save registers
2996 *	@vptr: velocity
2997 *	@context: buffer for stored context
2998 *
2999 *	Retrieve the current configuration from the velocity hardware
3000 *	and stash it in the context structure, for use by the context
3001 *	restore functions. This allows us to save things we need across
3002 *	power down states
3003 */
3004static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3005{
3006	struct mac_regs __iomem *regs = vptr->mac_regs;
3007	u16 i;
3008	u8 __iomem *ptr = (u8 __iomem *)regs;
3009
3010	for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3011		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3012
3013	for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3014		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3015
3016	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3017		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3018
3019}
3020
3021static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3022{
3023	struct net_device *dev = pci_get_drvdata(pdev);
3024	struct velocity_info *vptr = netdev_priv(dev);
3025	unsigned long flags;
3026
3027	if (!netif_running(vptr->dev))
3028		return 0;
3029
3030	netif_device_detach(vptr->dev);
3031
3032	spin_lock_irqsave(&vptr->lock, flags);
3033	pci_save_state(pdev);
3034#ifdef ETHTOOL_GWOL
3035	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3036		velocity_get_ip(vptr);
3037		velocity_save_context(vptr, &vptr->context);
3038		velocity_shutdown(vptr);
3039		velocity_set_wol(vptr);
3040		pci_enable_wake(pdev, PCI_D3hot, 1);
3041		pci_set_power_state(pdev, PCI_D3hot);
3042	} else {
3043		velocity_save_context(vptr, &vptr->context);
3044		velocity_shutdown(vptr);
3045		pci_disable_device(pdev);
3046		pci_set_power_state(pdev, pci_choose_state(pdev, state));
3047	}
3048#else
3049	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3050#endif
3051	spin_unlock_irqrestore(&vptr->lock, flags);
3052	return 0;
3053}
3054
3055/**
3056 *	velocity_restore_context	-	restore registers
3057 *	@vptr: velocity
3058 *	@context: buffer for stored context
3059 *
3060 *	Reload the register configuration from the velocity context
3061 *	created by velocity_save_context.
3062 */
3063static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3064{
3065	struct mac_regs __iomem *regs = vptr->mac_regs;
3066	int i;
3067	u8 __iomem *ptr = (u8 __iomem *)regs;
3068
3069	for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3070		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3071
3072	/* Just skip cr0 */
3073	for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3074		/* Clear */
3075		writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3076		/* Set */
3077		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3078	}
3079
3080	for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3081		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3082
3083	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3084		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3085
3086	for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3087		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3088}
3089
3090static int velocity_resume(struct pci_dev *pdev)
3091{
3092	struct net_device *dev = pci_get_drvdata(pdev);
3093	struct velocity_info *vptr = netdev_priv(dev);
3094	unsigned long flags;
3095	int i;
3096
3097	if (!netif_running(vptr->dev))
3098		return 0;
3099
3100	pci_set_power_state(pdev, PCI_D0);
3101	pci_enable_wake(pdev, 0, 0);
3102	pci_restore_state(pdev);
3103
3104	mac_wol_reset(vptr->mac_regs);
3105
3106	spin_lock_irqsave(&vptr->lock, flags);
3107	velocity_restore_context(vptr, &vptr->context);
3108	velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3109	mac_disable_int(vptr->mac_regs);
3110
3111	velocity_tx_srv(vptr);
3112
3113	for (i = 0; i < vptr->tx.numq; i++) {
3114		if (vptr->tx.used[i])
3115			mac_tx_queue_wake(vptr->mac_regs, i);
3116	}
3117
3118	mac_enable_int(vptr->mac_regs);
3119	spin_unlock_irqrestore(&vptr->lock, flags);
3120	netif_device_attach(vptr->dev);
3121
3122	return 0;
3123}
3124#endif
3125
3126/*
3127 *	Definition for our device driver. The PCI layer interface
3128 *	uses this to handle all our card discover and plugging
3129 */
3130static struct pci_driver velocity_driver = {
3131      .name	= VELOCITY_NAME,
3132      .id_table	= velocity_id_table,
3133      .probe	= velocity_found1,
3134      .remove	= __devexit_p(velocity_remove1),
3135#ifdef CONFIG_PM
3136      .suspend	= velocity_suspend,
3137      .resume	= velocity_resume,
3138#endif
3139};
3140
3141
3142/**
3143 *	velocity_ethtool_up	-	pre hook for ethtool
3144 *	@dev: network device
3145 *
3146 *	Called before an ethtool operation. We need to make sure the
3147 *	chip is out of D3 state before we poke at it.
3148 */
3149static int velocity_ethtool_up(struct net_device *dev)
3150{
3151	struct velocity_info *vptr = netdev_priv(dev);
3152	if (!netif_running(dev))
3153		pci_set_power_state(vptr->pdev, PCI_D0);
3154	return 0;
3155}
3156
3157/**
3158 *	velocity_ethtool_down	-	post hook for ethtool
3159 *	@dev: network device
3160 *
3161 *	Called after an ethtool operation. Restore the chip back to D3
3162 *	state if it isn't running.
3163 */
3164static void velocity_ethtool_down(struct net_device *dev)
3165{
3166	struct velocity_info *vptr = netdev_priv(dev);
3167	if (!netif_running(dev))
3168		pci_set_power_state(vptr->pdev, PCI_D3hot);
3169}
3170
3171static int velocity_get_settings(struct net_device *dev,
3172				 struct ethtool_cmd *cmd)
3173{
3174	struct velocity_info *vptr = netdev_priv(dev);
3175	struct mac_regs __iomem *regs = vptr->mac_regs;
3176	u32 status;
3177	status = check_connection_type(vptr->mac_regs);
3178
3179	cmd->supported = SUPPORTED_TP |
3180			SUPPORTED_Autoneg |
3181			SUPPORTED_10baseT_Half |
3182			SUPPORTED_10baseT_Full |
3183			SUPPORTED_100baseT_Half |
3184			SUPPORTED_100baseT_Full |
3185			SUPPORTED_1000baseT_Half |
3186			SUPPORTED_1000baseT_Full;
3187
3188	cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3189	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3190		cmd->advertising |=
3191			ADVERTISED_10baseT_Half |
3192			ADVERTISED_10baseT_Full |
3193			ADVERTISED_100baseT_Half |
3194			ADVERTISED_100baseT_Full |
3195			ADVERTISED_1000baseT_Half |
3196			ADVERTISED_1000baseT_Full;
3197	} else {
3198		switch (vptr->options.spd_dpx) {
3199		case SPD_DPX_1000_FULL:
3200			cmd->advertising |= ADVERTISED_1000baseT_Full;
3201			break;
3202		case SPD_DPX_100_HALF:
3203			cmd->advertising |= ADVERTISED_100baseT_Half;
3204			break;
3205		case SPD_DPX_100_FULL:
3206			cmd->advertising |= ADVERTISED_100baseT_Full;
3207			break;
3208		case SPD_DPX_10_HALF:
3209			cmd->advertising |= ADVERTISED_10baseT_Half;
3210			break;
3211		case SPD_DPX_10_FULL:
3212			cmd->advertising |= ADVERTISED_10baseT_Full;
3213			break;
3214		default:
3215			break;
3216		}
3217	}
3218
3219	if (status & VELOCITY_SPEED_1000)
3220		ethtool_cmd_speed_set(cmd, SPEED_1000);
3221	else if (status & VELOCITY_SPEED_100)
3222		ethtool_cmd_speed_set(cmd, SPEED_100);
3223	else
3224		ethtool_cmd_speed_set(cmd, SPEED_10);
3225
3226	cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3227	cmd->port = PORT_TP;
3228	cmd->transceiver = XCVR_INTERNAL;
3229	cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3230
3231	if (status & VELOCITY_DUPLEX_FULL)
3232		cmd->duplex = DUPLEX_FULL;
3233	else
3234		cmd->duplex = DUPLEX_HALF;
3235
3236	return 0;
3237}
3238
3239static int velocity_set_settings(struct net_device *dev,
3240				 struct ethtool_cmd *cmd)
3241{
3242	struct velocity_info *vptr = netdev_priv(dev);
3243	u32 speed = ethtool_cmd_speed(cmd);
3244	u32 curr_status;
3245	u32 new_status = 0;
3246	int ret = 0;
3247
3248	curr_status = check_connection_type(vptr->mac_regs);
3249	curr_status &= (~VELOCITY_LINK_FAIL);
3250
3251	new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3252	new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3253	new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3254	new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3255	new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3256
3257	if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3258	    (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3259		ret = -EINVAL;
3260	} else {
3261		enum speed_opt spd_dpx;
3262
3263		if (new_status & VELOCITY_AUTONEG_ENABLE)
3264			spd_dpx = SPD_DPX_AUTO;
3265		else if ((new_status & VELOCITY_SPEED_1000) &&
3266			 (new_status & VELOCITY_DUPLEX_FULL)) {
3267			spd_dpx = SPD_DPX_1000_FULL;
3268		} else if (new_status & VELOCITY_SPEED_100)
3269			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3270				SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3271		else if (new_status & VELOCITY_SPEED_10)
3272			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3273				SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3274		else
3275			return -EOPNOTSUPP;
3276
3277		vptr->options.spd_dpx = spd_dpx;
3278
3279		velocity_set_media_mode(vptr, new_status);
3280	}
3281
3282	return ret;
3283}
3284
3285static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3286{
3287	struct velocity_info *vptr = netdev_priv(dev);
3288	strcpy(info->driver, VELOCITY_NAME);
3289	strcpy(info->version, VELOCITY_VERSION);
3290	strcpy(info->bus_info, pci_name(vptr->pdev));
3291}
3292
3293static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3294{
3295	struct velocity_info *vptr = netdev_priv(dev);
3296	wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3297	wol->wolopts |= WAKE_MAGIC;
3298	/*
3299	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3300		   wol.wolopts|=WAKE_PHY;
3301			 */
3302	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3303		wol->wolopts |= WAKE_UCAST;
3304	if (vptr->wol_opts & VELOCITY_WOL_ARP)
3305		wol->wolopts |= WAKE_ARP;
3306	memcpy(&wol->sopass, vptr->wol_passwd, 6);
3307}
3308
3309static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3310{
3311	struct velocity_info *vptr = netdev_priv(dev);
3312
3313	if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3314		return -EFAULT;
3315	vptr->wol_opts = VELOCITY_WOL_MAGIC;
3316
3317	/*
3318	   if (wol.wolopts & WAKE_PHY) {
3319	   vptr->wol_opts|=VELOCITY_WOL_PHY;
3320	   vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3321	   }
3322	 */
3323
3324	if (wol->wolopts & WAKE_MAGIC) {
3325		vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3326		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3327	}
3328	if (wol->wolopts & WAKE_UCAST) {
3329		vptr->wol_opts |= VELOCITY_WOL_UCAST;
3330		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3331	}
3332	if (wol->wolopts & WAKE_ARP) {
3333		vptr->wol_opts |= VELOCITY_WOL_ARP;
3334		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3335	}
3336	memcpy(vptr->wol_passwd, wol->sopass, 6);
3337	return 0;
3338}
3339
3340static u32 velocity_get_msglevel(struct net_device *dev)
3341{
3342	return msglevel;
3343}
3344
3345static void velocity_set_msglevel(struct net_device *dev, u32 value)
3346{
3347	 msglevel = value;
3348}
3349
3350static int get_pending_timer_val(int val)
3351{
3352	int mult_bits = val >> 6;
3353	int mult = 1;
3354
3355	switch (mult_bits)
3356	{
3357	case 1:
3358		mult = 4; break;
3359	case 2:
3360		mult = 16; break;
3361	case 3:
3362		mult = 64; break;
3363	case 0:
3364	default:
3365		break;
3366	}
3367
3368	return (val & 0x3f) * mult;
3369}
3370
3371static void set_pending_timer_val(int *val, u32 us)
3372{
3373	u8 mult = 0;
3374	u8 shift = 0;
3375
3376	if (us >= 0x3f) {
3377		mult = 1; /* mult with 4 */
3378		shift = 2;
3379	}
3380	if (us >= 0x3f * 4) {
3381		mult = 2; /* mult with 16 */
3382		shift = 4;
3383	}
3384	if (us >= 0x3f * 16) {
3385		mult = 3; /* mult with 64 */
3386		shift = 6;
3387	}
3388
3389	*val = (mult << 6) | ((us >> shift) & 0x3f);
3390}
3391
3392
3393static int velocity_get_coalesce(struct net_device *dev,
3394		struct ethtool_coalesce *ecmd)
3395{
3396	struct velocity_info *vptr = netdev_priv(dev);
3397
3398	ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3399	ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3400
3401	ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3402	ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3403
3404	return 0;
3405}
3406
3407static int velocity_set_coalesce(struct net_device *dev,
3408		struct ethtool_coalesce *ecmd)
3409{
3410	struct velocity_info *vptr = netdev_priv(dev);
3411	int max_us = 0x3f * 64;
3412	unsigned long flags;
3413
3414	/* 6 bits of  */
3415	if (ecmd->tx_coalesce_usecs > max_us)
3416		return -EINVAL;
3417	if (ecmd->rx_coalesce_usecs > max_us)
3418		return -EINVAL;
3419
3420	if (ecmd->tx_max_coalesced_frames > 0xff)
3421		return -EINVAL;
3422	if (ecmd->rx_max_coalesced_frames > 0xff)
3423		return -EINVAL;
3424
3425	vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3426	vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3427
3428	set_pending_timer_val(&vptr->options.rxqueue_timer,
3429			ecmd->rx_coalesce_usecs);
3430	set_pending_timer_val(&vptr->options.txqueue_timer,
3431			ecmd->tx_coalesce_usecs);
3432
3433	/* Setup the interrupt suppression and queue timers */
3434	spin_lock_irqsave(&vptr->lock, flags);
3435	mac_disable_int(vptr->mac_regs);
3436	setup_adaptive_interrupts(vptr);
3437	setup_queue_timers(vptr);
3438
3439	mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3440	mac_clear_isr(vptr->mac_regs);
3441	mac_enable_int(vptr->mac_regs);
3442	spin_unlock_irqrestore(&vptr->lock, flags);
3443
3444	return 0;
3445}
3446
3447static const struct ethtool_ops velocity_ethtool_ops = {
3448	.get_settings	=	velocity_get_settings,
3449	.set_settings	=	velocity_set_settings,
3450	.get_drvinfo	=	velocity_get_drvinfo,
3451	.get_wol	=	velocity_ethtool_get_wol,
3452	.set_wol	=	velocity_ethtool_set_wol,
3453	.get_msglevel	=	velocity_get_msglevel,
3454	.set_msglevel	=	velocity_set_msglevel,
3455	.get_link	=	velocity_get_link,
3456	.get_coalesce	=	velocity_get_coalesce,
3457	.set_coalesce	=	velocity_set_coalesce,
3458	.begin		=	velocity_ethtool_up,
3459	.complete	=	velocity_ethtool_down
3460};
3461
3462#ifdef CONFIG_PM
3463#ifdef CONFIG_INET
3464static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3465{
3466	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3467	struct net_device *dev = ifa->ifa_dev->dev;
3468
3469	if (dev_net(dev) == &init_net &&
3470	    dev->netdev_ops == &velocity_netdev_ops)
3471		velocity_get_ip(netdev_priv(dev));
3472
3473	return NOTIFY_DONE;
3474}
3475#endif	/* CONFIG_INET */
3476#endif	/* CONFIG_PM */
3477
3478#if defined(CONFIG_PM) && defined(CONFIG_INET)
3479static struct notifier_block velocity_inetaddr_notifier = {
3480      .notifier_call	= velocity_netdev_event,
3481};
3482
3483static void velocity_register_notifier(void)
3484{
3485	register_inetaddr_notifier(&velocity_inetaddr_notifier);
3486}
3487
3488static void velocity_unregister_notifier(void)
3489{
3490	unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3491}
3492
3493#else
3494
3495#define velocity_register_notifier()	do {} while (0)
3496#define velocity_unregister_notifier()	do {} while (0)
3497
3498#endif	/* defined(CONFIG_PM) && defined(CONFIG_INET) */
3499
3500/**
3501 *	velocity_init_module	-	load time function
3502 *
3503 *	Called when the velocity module is loaded. The PCI driver
3504 *	is registered with the PCI layer, and in turn will call
3505 *	the probe functions for each velocity adapter installed
3506 *	in the system.
3507 */
3508static int __init velocity_init_module(void)
3509{
3510	int ret;
3511
3512	velocity_register_notifier();
3513	ret = pci_register_driver(&velocity_driver);
3514	if (ret < 0)
3515		velocity_unregister_notifier();
3516	return ret;
3517}
3518
3519/**
3520 *	velocity_cleanup	-	module unload
3521 *
3522 *	When the velocity hardware is unloaded this function is called.
3523 *	It will clean up the notifiers and the unregister the PCI
3524 *	driver interface for this hardware. This in turn cleans up
3525 *	all discovered interfaces before returning from the function
3526 */
3527static void __exit velocity_cleanup_module(void)
3528{
3529	velocity_unregister_notifier();
3530	pci_unregister_driver(&velocity_driver);
3531}
3532
3533module_init(velocity_init_module);
3534module_exit(velocity_cleanup_module);