Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * This code is derived from the VIA reference driver (copyright message
   4 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   5 * addition to the Linux kernel.
   6 *
   7 * The code has been merged into one source file, cleaned up to follow
   8 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   9 * for 64bit hardware platforms.
  10 *
  11 * TODO
  12 *	rx_copybreak/alignment
  13 *	More testing
  14 *
  15 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  16 * Additional fixes and clean up: Francois Romieu
  17 *
  18 * This source has not been verified for use in safety critical systems.
  19 *
  20 * Please direct queries about the revamped driver to the linux-kernel
  21 * list not VIA.
  22 *
  23 * Original code:
  24 *
  25 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  26 * All rights reserved.
  27 *
  28 * Author: Chuang Liang-Shing, AJ Jiang
  29 *
  30 * Date: Jan 24, 2003
  31 *
  32 * MODULE_LICENSE("GPL");
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/module.h>
  38#include <linux/types.h>
  39#include <linux/bitops.h>
  40#include <linux/init.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/mm.h>
  43#include <linux/errno.h>
  44#include <linux/ioport.h>
  45#include <linux/pci.h>
  46#include <linux/kernel.h>
  47#include <linux/netdevice.h>
  48#include <linux/etherdevice.h>
  49#include <linux/skbuff.h>
  50#include <linux/delay.h>
  51#include <linux/timer.h>
  52#include <linux/slab.h>
  53#include <linux/interrupt.h>
  54#include <linux/string.h>
  55#include <linux/wait.h>
  56#include <linux/io.h>
  57#include <linux/if.h>
  58#include <linux/uaccess.h>
  59#include <linux/proc_fs.h>
  60#include <linux/of.h>
  61#include <linux/of_address.h>
 
  62#include <linux/of_irq.h>
  63#include <linux/inetdevice.h>
  64#include <linux/platform_device.h>
  65#include <linux/reboot.h>
  66#include <linux/ethtool.h>
  67#include <linux/mii.h>
  68#include <linux/in.h>
  69#include <linux/if_arp.h>
  70#include <linux/if_vlan.h>
  71#include <linux/ip.h>
  72#include <linux/tcp.h>
  73#include <linux/udp.h>
  74#include <linux/crc-ccitt.h>
  75#include <linux/crc32.h>
  76
  77#include "via-velocity.h"
  78
  79enum velocity_bus_type {
  80	BUS_PCI,
  81	BUS_PLATFORM,
  82};
  83
  84static int velocity_nics;
  85
  86static void velocity_set_power_state(struct velocity_info *vptr, char state)
  87{
  88	void *addr = vptr->mac_regs;
  89
  90	if (vptr->pdev)
  91		pci_set_power_state(vptr->pdev, state);
  92	else
  93		writeb(state, addr + 0x154);
  94}
  95
  96/**
  97 *	mac_get_cam_mask	-	Read a CAM mask
  98 *	@regs: register block for this velocity
  99 *	@mask: buffer to store mask
 100 *
 101 *	Fetch the mask bits of the selected CAM and store them into the
 102 *	provided mask buffer.
 103 */
 104static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 105{
 106	int i;
 107
 108	/* Select CAM mask */
 109	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 110
 111	writeb(0, &regs->CAMADDR);
 112
 113	/* read mask */
 114	for (i = 0; i < 8; i++)
 115		*mask++ = readb(&(regs->MARCAM[i]));
 116
 117	/* disable CAMEN */
 118	writeb(0, &regs->CAMADDR);
 119
 120	/* Select mar */
 121	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 122}
 123
 124/**
 125 *	mac_set_cam_mask	-	Set a CAM mask
 126 *	@regs: register block for this velocity
 127 *	@mask: CAM mask to load
 128 *
 129 *	Store a new mask into a CAM
 130 */
 131static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 132{
 133	int i;
 134	/* Select CAM mask */
 135	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 136
 137	writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 138
 139	for (i = 0; i < 8; i++)
 140		writeb(*mask++, &(regs->MARCAM[i]));
 141
 142	/* disable CAMEN */
 143	writeb(0, &regs->CAMADDR);
 144
 145	/* Select mar */
 146	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 147}
 148
 149static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 150{
 151	int i;
 152	/* Select CAM mask */
 153	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 154
 155	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 156
 157	for (i = 0; i < 8; i++)
 158		writeb(*mask++, &(regs->MARCAM[i]));
 159
 160	/* disable CAMEN */
 161	writeb(0, &regs->CAMADDR);
 162
 163	/* Select mar */
 164	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 165}
 166
 167/**
 168 *	mac_set_cam	-	set CAM data
 169 *	@regs: register block of this velocity
 170 *	@idx: Cam index
 171 *	@addr: 2 or 6 bytes of CAM data
 172 *
 173 *	Load an address or vlan tag into a CAM
 174 */
 175static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 176{
 177	int i;
 178
 179	/* Select CAM mask */
 180	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 181
 182	idx &= (64 - 1);
 183
 184	writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 185
 186	for (i = 0; i < 6; i++)
 187		writeb(*addr++, &(regs->MARCAM[i]));
 188
 189	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 190
 191	udelay(10);
 192
 193	writeb(0, &regs->CAMADDR);
 194
 195	/* Select mar */
 196	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 197}
 198
 199static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 200			     const u8 *addr)
 201{
 202
 203	/* Select CAM mask */
 204	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 205
 206	idx &= (64 - 1);
 207
 208	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 209	writew(*((u16 *) addr), &regs->MARCAM[0]);
 210
 211	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 212
 213	udelay(10);
 214
 215	writeb(0, &regs->CAMADDR);
 216
 217	/* Select mar */
 218	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 219}
 220
 221
 222/**
 223 *	mac_wol_reset	-	reset WOL after exiting low power
 224 *	@regs: register block of this velocity
 225 *
 226 *	Called after we drop out of wake on lan mode in order to
 227 *	reset the Wake on lan features. This function doesn't restore
 228 *	the rest of the logic from the result of sleep/wakeup
 229 */
 230static void mac_wol_reset(struct mac_regs __iomem *regs)
 231{
 232
 233	/* Turn off SWPTAG right after leaving power mode */
 234	BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 235	/* clear sticky bits */
 236	BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 237
 238	BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 239	BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 240	/* disable force PME-enable */
 241	writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 242	/* disable power-event config bit */
 243	writew(0xFFFF, &regs->WOLCRClr);
 244	/* clear power status */
 245	writew(0xFFFF, &regs->WOLSRClr);
 246}
 247
 248static const struct ethtool_ops velocity_ethtool_ops;
 249
 250/*
 251    Define module options
 252*/
 253
 254MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 255MODULE_LICENSE("GPL");
 256MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 257
 258#define VELOCITY_PARAM(N, D) \
 259	static int N[MAX_UNITS] = OPTION_DEFAULT;\
 260	module_param_array(N, int, NULL, 0); \
 261	MODULE_PARM_DESC(N, D);
 262
 263#define RX_DESC_MIN     64
 264#define RX_DESC_MAX     255
 265#define RX_DESC_DEF     64
 266VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 267
 268#define TX_DESC_MIN     16
 269#define TX_DESC_MAX     256
 270#define TX_DESC_DEF     64
 271VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 272
 273#define RX_THRESH_MIN   0
 274#define RX_THRESH_MAX   3
 275#define RX_THRESH_DEF   0
 276/* rx_thresh[] is used for controlling the receive fifo threshold.
 277   0: indicate the rxfifo threshold is 128 bytes.
 278   1: indicate the rxfifo threshold is 512 bytes.
 279   2: indicate the rxfifo threshold is 1024 bytes.
 280   3: indicate the rxfifo threshold is store & forward.
 281*/
 282VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 283
 284#define DMA_LENGTH_MIN  0
 285#define DMA_LENGTH_MAX  7
 286#define DMA_LENGTH_DEF  6
 287
 288/* DMA_length[] is used for controlling the DMA length
 289   0: 8 DWORDs
 290   1: 16 DWORDs
 291   2: 32 DWORDs
 292   3: 64 DWORDs
 293   4: 128 DWORDs
 294   5: 256 DWORDs
 295   6: SF(flush till emply)
 296   7: SF(flush till emply)
 297*/
 298VELOCITY_PARAM(DMA_length, "DMA length");
 299
 300#define IP_ALIG_DEF     0
 301/* IP_byte_align[] is used for IP header DWORD byte aligned
 302   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 303   1: indicate the IP header will be DWORD byte aligned.
 304      In some environment, the IP header should be DWORD byte aligned,
 305      or the packet will be droped when we receive it. (eg: IPVS)
 306*/
 307VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 308
 309#define FLOW_CNTL_DEF   1
 310#define FLOW_CNTL_MIN   1
 311#define FLOW_CNTL_MAX   5
 312
 313/* flow_control[] is used for setting the flow control ability of NIC.
 314   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 315   2: enable TX flow control.
 316   3: enable RX flow control.
 317   4: enable RX/TX flow control.
 318   5: disable
 319*/
 320VELOCITY_PARAM(flow_control, "Enable flow control ability");
 321
 322#define MED_LNK_DEF 0
 323#define MED_LNK_MIN 0
 324#define MED_LNK_MAX 5
 325/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 326   0: indicate autonegotiation for both speed and duplex mode
 327   1: indicate 100Mbps half duplex mode
 328   2: indicate 100Mbps full duplex mode
 329   3: indicate 10Mbps half duplex mode
 330   4: indicate 10Mbps full duplex mode
 331   5: indicate 1000Mbps full duplex mode
 332
 333   Note:
 334   if EEPROM have been set to the force mode, this option is ignored
 335   by driver.
 336*/
 337VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 338
 339#define WOL_OPT_DEF     0
 340#define WOL_OPT_MIN     0
 341#define WOL_OPT_MAX     7
 342/* wol_opts[] is used for controlling wake on lan behavior.
 343   0: Wake up if recevied a magic packet. (Default)
 344   1: Wake up if link status is on/off.
 345   2: Wake up if recevied an arp packet.
 346   4: Wake up if recevied any unicast packet.
 347   Those value can be sumed up to support more than one option.
 348*/
 349VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 350
 351static int rx_copybreak = 200;
 352module_param(rx_copybreak, int, 0644);
 353MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 354
 355/*
 356 *	Internal board variants. At the moment we have only one
 357 */
 358static struct velocity_info_tbl chip_info_table[] = {
 359	{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 360	{ }
 361};
 362
 363/*
 364 *	Describe the PCI device identifiers that we support in this
 365 *	device driver. Used for hotplug autoloading.
 366 */
 367
 368static const struct pci_device_id velocity_pci_id_table[] = {
 369	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 370	{ }
 371};
 372
 373MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
 374
 375/*
 376 *	Describe the OF device identifiers that we support in this
 377 *	device driver. Used for devicetree nodes.
 378 */
 379static const struct of_device_id velocity_of_ids[] = {
 380	{ .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
 381	{ /* Sentinel */ },
 382};
 383MODULE_DEVICE_TABLE(of, velocity_of_ids);
 384
 385/**
 386 *	get_chip_name	- 	identifier to name
 387 *	@chip_id: chip identifier
 388 *
 389 *	Given a chip identifier return a suitable description. Returns
 390 *	a pointer a static string valid while the driver is loaded.
 391 */
 392static const char *get_chip_name(enum chip_type chip_id)
 393{
 394	int i;
 395	for (i = 0; chip_info_table[i].name != NULL; i++)
 396		if (chip_info_table[i].chip_id == chip_id)
 397			break;
 398	return chip_info_table[i].name;
 399}
 400
 401/**
 402 *	velocity_set_int_opt	-	parser for integer options
 403 *	@opt: pointer to option value
 404 *	@val: value the user requested (or -1 for default)
 405 *	@min: lowest value allowed
 406 *	@max: highest value allowed
 407 *	@def: default value
 408 *	@name: property name
 409 *
 410 *	Set an integer property in the module options. This function does
 411 *	all the verification and checking as well as reporting so that
 412 *	we don't duplicate code for each option.
 413 */
 414static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
 415				 char *name)
 416{
 417	if (val == -1)
 418		*opt = def;
 419	else if (val < min || val > max) {
 420		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 421			  name, min, max);
 422		*opt = def;
 423	} else {
 424		pr_info("set value of parameter %s to %d\n", name, val);
 425		*opt = val;
 426	}
 427}
 428
 429/**
 430 *	velocity_set_bool_opt	-	parser for boolean options
 431 *	@opt: pointer to option value
 432 *	@val: value the user requested (or -1 for default)
 433 *	@def: default value (yes/no)
 434 *	@flag: numeric value to set for true.
 435 *	@name: property name
 436 *
 437 *	Set a boolean property in the module options. This function does
 438 *	all the verification and checking as well as reporting so that
 439 *	we don't duplicate code for each option.
 440 */
 441static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
 442				  char *name)
 443{
 444	(*opt) &= (~flag);
 445	if (val == -1)
 446		*opt |= (def ? flag : 0);
 447	else if (val < 0 || val > 1) {
 448		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 449			  name, 0, 1);
 450		*opt |= (def ? flag : 0);
 451	} else {
 452		pr_info("set parameter %s to %s\n",
 453			name, val ? "TRUE" : "FALSE");
 454		*opt |= (val ? flag : 0);
 455	}
 456}
 457
 458/**
 459 *	velocity_get_options	-	set options on device
 460 *	@opts: option structure for the device
 461 *	@index: index of option to use in module options array
 462 *
 463 *	Turn the module and command options into a single structure
 464 *	for the current device
 465 */
 466static void velocity_get_options(struct velocity_opt *opts, int index)
 467{
 468
 469	velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index],
 470			     RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF,
 471			     "rx_thresh");
 472	velocity_set_int_opt(&opts->DMA_length, DMA_length[index],
 473			     DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF,
 474			     "DMA_length");
 475	velocity_set_int_opt(&opts->numrx, RxDescriptors[index],
 476			     RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF,
 477			     "RxDescriptors");
 478	velocity_set_int_opt(&opts->numtx, TxDescriptors[index],
 479			     TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF,
 480			     "TxDescriptors");
 481
 482	velocity_set_int_opt(&opts->flow_cntl, flow_control[index],
 483			     FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF,
 484			     "flow_control");
 485	velocity_set_bool_opt(&opts->flags, IP_byte_align[index],
 486			      IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN,
 487			      "IP_byte_align");
 488	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index],
 489			     MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF,
 490			     "Media link mode");
 491	velocity_set_int_opt(&opts->wol_opts, wol_opts[index],
 492			     WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF,
 493			     "Wake On Lan options");
 494	opts->numrx = (opts->numrx & ~3);
 495}
 496
 497/**
 498 *	velocity_init_cam_filter	-	initialise CAM
 499 *	@vptr: velocity to program
 500 *
 501 *	Initialize the content addressable memory used for filters. Load
 502 *	appropriately according to the presence of VLAN
 503 */
 504static void velocity_init_cam_filter(struct velocity_info *vptr)
 505{
 506	struct mac_regs __iomem *regs = vptr->mac_regs;
 507	unsigned int vid, i = 0;
 508
 509	/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 510	WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 511	WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 512
 513	/* Disable all CAMs */
 514	memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 515	memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 516	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 517	mac_set_cam_mask(regs, vptr->mCAMmask);
 518
 519	/* Enable VCAMs */
 520	for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
 521		mac_set_vlan_cam(regs, i, (u8 *) &vid);
 522		vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 523		if (++i >= VCAM_SIZE)
 524			break;
 525	}
 526	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 527}
 528
 529static int velocity_vlan_rx_add_vid(struct net_device *dev,
 530				    __be16 proto, u16 vid)
 531{
 532	struct velocity_info *vptr = netdev_priv(dev);
 533
 534	spin_lock_irq(&vptr->lock);
 535	set_bit(vid, vptr->active_vlans);
 536	velocity_init_cam_filter(vptr);
 537	spin_unlock_irq(&vptr->lock);
 538	return 0;
 539}
 540
 541static int velocity_vlan_rx_kill_vid(struct net_device *dev,
 542				     __be16 proto, u16 vid)
 543{
 544	struct velocity_info *vptr = netdev_priv(dev);
 545
 546	spin_lock_irq(&vptr->lock);
 547	clear_bit(vid, vptr->active_vlans);
 548	velocity_init_cam_filter(vptr);
 549	spin_unlock_irq(&vptr->lock);
 550	return 0;
 551}
 552
 553static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 554{
 555	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 556}
 557
 558/**
 559 *	velocity_rx_reset	-	handle a receive reset
 560 *	@vptr: velocity we are resetting
 561 *
 562 *	Reset the ownership and status for the receive ring side.
 563 *	Hand all the receive queue to the NIC.
 564 */
 565static void velocity_rx_reset(struct velocity_info *vptr)
 566{
 567
 568	struct mac_regs __iomem *regs = vptr->mac_regs;
 569	int i;
 570
 571	velocity_init_rx_ring_indexes(vptr);
 572
 573	/*
 574	 *	Init state, all RD entries belong to the NIC
 575	 */
 576	for (i = 0; i < vptr->options.numrx; ++i)
 577		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 578
 579	writew(vptr->options.numrx, &regs->RBRDU);
 580	writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 581	writew(0, &regs->RDIdx);
 582	writew(vptr->options.numrx - 1, &regs->RDCSize);
 583}
 584
 585/**
 586 *	velocity_get_opt_media_mode	-	get media selection
 587 *	@vptr: velocity adapter
 588 *
 589 *	Get the media mode stored in EEPROM or module options and load
 590 *	mii_status accordingly. The requested link state information
 591 *	is also returned.
 592 */
 593static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 594{
 595	u32 status = 0;
 596
 597	switch (vptr->options.spd_dpx) {
 598	case SPD_DPX_AUTO:
 599		status = VELOCITY_AUTONEG_ENABLE;
 600		break;
 601	case SPD_DPX_100_FULL:
 602		status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 603		break;
 604	case SPD_DPX_10_FULL:
 605		status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 606		break;
 607	case SPD_DPX_100_HALF:
 608		status = VELOCITY_SPEED_100;
 609		break;
 610	case SPD_DPX_10_HALF:
 611		status = VELOCITY_SPEED_10;
 612		break;
 613	case SPD_DPX_1000_FULL:
 614		status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 615		break;
 616	}
 617	vptr->mii_status = status;
 618	return status;
 619}
 620
 621/**
 622 *	safe_disable_mii_autopoll	-	autopoll off
 623 *	@regs: velocity registers
 624 *
 625 *	Turn off the autopoll and wait for it to disable on the chip
 626 */
 627static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 628{
 629	u16 ww;
 630
 631	/*  turn off MAUTO */
 632	writeb(0, &regs->MIICR);
 633	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 634		udelay(1);
 635		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 636			break;
 637	}
 638}
 639
 640/**
 641 *	enable_mii_autopoll	-	turn on autopolling
 642 *	@regs: velocity registers
 643 *
 644 *	Enable the MII link status autopoll feature on the Velocity
 645 *	hardware. Wait for it to enable.
 646 */
 647static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 648{
 649	int ii;
 650
 651	writeb(0, &(regs->MIICR));
 652	writeb(MIIADR_SWMPL, &regs->MIIADR);
 653
 654	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 655		udelay(1);
 656		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 657			break;
 658	}
 659
 660	writeb(MIICR_MAUTO, &regs->MIICR);
 661
 662	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 663		udelay(1);
 664		if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 665			break;
 666	}
 667
 668}
 669
 670/**
 671 *	velocity_mii_read	-	read MII data
 672 *	@regs: velocity registers
 673 *	@index: MII register index
 674 *	@data: buffer for received data
 675 *
 676 *	Perform a single read of an MII 16bit register. Returns zero
 677 *	on success or -ETIMEDOUT if the PHY did not respond.
 678 */
 679static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 680{
 681	u16 ww;
 682
 683	/*
 684	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
 685	 */
 686	safe_disable_mii_autopoll(regs);
 687
 688	writeb(index, &regs->MIIADR);
 689
 690	BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 691
 692	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 693		if (!(readb(&regs->MIICR) & MIICR_RCMD))
 694			break;
 695	}
 696
 697	*data = readw(&regs->MIIDATA);
 698
 699	enable_mii_autopoll(regs);
 700	if (ww == W_MAX_TIMEOUT)
 701		return -ETIMEDOUT;
 702	return 0;
 703}
 704
 705/**
 706 *	mii_check_media_mode	-	check media state
 707 *	@regs: velocity registers
 708 *
 709 *	Check the current MII status and determine the link status
 710 *	accordingly
 711 */
 712static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 713{
 714	u32 status = 0;
 715	u16 ANAR;
 716
 717	if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 718		status |= VELOCITY_LINK_FAIL;
 719
 720	if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 721		status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 722	else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 723		status |= (VELOCITY_SPEED_1000);
 724	else {
 725		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 726		if (ANAR & ADVERTISE_100FULL)
 727			status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 728		else if (ANAR & ADVERTISE_100HALF)
 729			status |= VELOCITY_SPEED_100;
 730		else if (ANAR & ADVERTISE_10FULL)
 731			status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 732		else
 733			status |= (VELOCITY_SPEED_10);
 734	}
 735
 736	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 737		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 738		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 739		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 740			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 741				status |= VELOCITY_AUTONEG_ENABLE;
 742		}
 743	}
 744
 745	return status;
 746}
 747
 748/**
 749 *	velocity_mii_write	-	write MII data
 750 *	@regs: velocity registers
 751 *	@mii_addr: MII register index
 752 *	@data: 16bit data for the MII register
 753 *
 754 *	Perform a single write to an MII 16bit register. Returns zero
 755 *	on success or -ETIMEDOUT if the PHY did not respond.
 756 */
 757static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 758{
 759	u16 ww;
 760
 761	/*
 762	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
 763	 */
 764	safe_disable_mii_autopoll(regs);
 765
 766	/* MII reg offset */
 767	writeb(mii_addr, &regs->MIIADR);
 768	/* set MII data */
 769	writew(data, &regs->MIIDATA);
 770
 771	/* turn on MIICR_WCMD */
 772	BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 773
 774	/* W_MAX_TIMEOUT is the timeout period */
 775	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 776		udelay(5);
 777		if (!(readb(&regs->MIICR) & MIICR_WCMD))
 778			break;
 779	}
 780	enable_mii_autopoll(regs);
 781
 782	if (ww == W_MAX_TIMEOUT)
 783		return -ETIMEDOUT;
 784	return 0;
 785}
 786
 787/**
 788 *	set_mii_flow_control	-	flow control setup
 789 *	@vptr: velocity interface
 790 *
 791 *	Set up the flow control on this interface according to
 792 *	the supplied user/eeprom options.
 793 */
 794static void set_mii_flow_control(struct velocity_info *vptr)
 795{
 796	/*Enable or Disable PAUSE in ANAR */
 797	switch (vptr->options.flow_cntl) {
 798	case FLOW_CNTL_TX:
 799		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 800		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 801		break;
 802
 803	case FLOW_CNTL_RX:
 804		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 805		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 806		break;
 807
 808	case FLOW_CNTL_TX_RX:
 809		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 810		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 811		break;
 812
 813	case FLOW_CNTL_DISABLE:
 814		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 815		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 816		break;
 817	default:
 818		break;
 819	}
 820}
 821
 822/**
 823 *	mii_set_auto_on		-	autonegotiate on
 824 *	@vptr: velocity
 825 *
 826 *	Enable autonegotation on this interface
 827 */
 828static void mii_set_auto_on(struct velocity_info *vptr)
 829{
 830	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 831		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 832	else
 833		MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 834}
 835
 836static u32 check_connection_type(struct mac_regs __iomem *regs)
 837{
 838	u32 status = 0;
 839	u8 PHYSR0;
 840	u16 ANAR;
 841	PHYSR0 = readb(&regs->PHYSR0);
 842
 843	/*
 844	   if (!(PHYSR0 & PHYSR0_LINKGD))
 845	   status|=VELOCITY_LINK_FAIL;
 846	 */
 847
 848	if (PHYSR0 & PHYSR0_FDPX)
 849		status |= VELOCITY_DUPLEX_FULL;
 850
 851	if (PHYSR0 & PHYSR0_SPDG)
 852		status |= VELOCITY_SPEED_1000;
 853	else if (PHYSR0 & PHYSR0_SPD10)
 854		status |= VELOCITY_SPEED_10;
 855	else
 856		status |= VELOCITY_SPEED_100;
 857
 858	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 859		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 860		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 861		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 862			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 863				status |= VELOCITY_AUTONEG_ENABLE;
 864		}
 865	}
 866
 867	return status;
 868}
 869
 870/**
 871 *	velocity_set_media_mode		-	set media mode
 872 *	@vptr: velocity adapter
 873 *	@mii_status: old MII link state
 874 *
 875 *	Check the media link state and configure the flow control
 876 *	PHY and also velocity hardware setup accordingly. In particular
 877 *	we need to set up CD polling and frame bursting.
 878 */
 879static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 880{
 
 881	struct mac_regs __iomem *regs = vptr->mac_regs;
 882
 883	vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 
 884
 885	/* Set mii link status */
 886	set_mii_flow_control(vptr);
 887
 
 
 
 
 
 
 
 
 
 
 
 888	if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 889		MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 890
 891	/*
 892	 *	If connection type is AUTO
 893	 */
 894	if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 895		netdev_info(vptr->netdev, "Velocity is in AUTO mode\n");
 896		/* clear force MAC mode bit */
 897		BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 898		/* set duplex mode of MAC according to duplex mode of MII */
 899		MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 900		MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 901		MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 902
 903		/* enable AUTO-NEGO mode */
 904		mii_set_auto_on(vptr);
 905	} else {
 906		u16 CTRL1000;
 907		u16 ANAR;
 908		u8 CHIPGCR;
 909
 910		/*
 911		 * 1. if it's 3119, disable frame bursting in halfduplex mode
 912		 *    and enable it in fullduplex mode
 913		 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 914		 * 3. only enable CD heart beat counter in 10HD mode
 915		 */
 916
 917		/* set force MAC mode bit */
 918		BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 919
 920		CHIPGCR = readb(&regs->CHIPGCR);
 921
 922		if (mii_status & VELOCITY_SPEED_1000)
 923			CHIPGCR |= CHIPGCR_FCGMII;
 924		else
 925			CHIPGCR &= ~CHIPGCR_FCGMII;
 926
 927		if (mii_status & VELOCITY_DUPLEX_FULL) {
 928			CHIPGCR |= CHIPGCR_FCFDX;
 929			writeb(CHIPGCR, &regs->CHIPGCR);
 930			netdev_info(vptr->netdev,
 931				    "set Velocity to forced full mode\n");
 932			if (vptr->rev_id < REV_ID_VT3216_A0)
 933				BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 934		} else {
 935			CHIPGCR &= ~CHIPGCR_FCFDX;
 936			netdev_info(vptr->netdev,
 937				    "set Velocity to forced half mode\n");
 938			writeb(CHIPGCR, &regs->CHIPGCR);
 939			if (vptr->rev_id < REV_ID_VT3216_A0)
 940				BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 941		}
 942
 943		velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 944		CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 945		if ((mii_status & VELOCITY_SPEED_1000) &&
 946		    (mii_status & VELOCITY_DUPLEX_FULL)) {
 947			CTRL1000 |= ADVERTISE_1000FULL;
 948		}
 949		velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 950
 951		if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 952			BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 953		else
 954			BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 955
 956		/* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 957		velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 958		ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 959		if (mii_status & VELOCITY_SPEED_100) {
 960			if (mii_status & VELOCITY_DUPLEX_FULL)
 961				ANAR |= ADVERTISE_100FULL;
 962			else
 963				ANAR |= ADVERTISE_100HALF;
 964		} else if (mii_status & VELOCITY_SPEED_10) {
 965			if (mii_status & VELOCITY_DUPLEX_FULL)
 966				ANAR |= ADVERTISE_10FULL;
 967			else
 968				ANAR |= ADVERTISE_10HALF;
 969		}
 970		velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 971		/* enable AUTO-NEGO mode */
 972		mii_set_auto_on(vptr);
 973		/* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 974	}
 975	/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 976	/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 977	return VELOCITY_LINK_CHANGE;
 978}
 979
 980/**
 981 *	velocity_print_link_status	-	link status reporting
 982 *	@vptr: velocity to report on
 983 *
 984 *	Turn the link status of the velocity card into a kernel log
 985 *	description of the new link state, detailing speed and duplex
 986 *	status
 987 */
 988static void velocity_print_link_status(struct velocity_info *vptr)
 989{
 990	const char *link;
 991	const char *speed;
 992	const char *duplex;
 993
 994	if (vptr->mii_status & VELOCITY_LINK_FAIL) {
 995		netdev_notice(vptr->netdev, "failed to detect cable link\n");
 996		return;
 997	}
 998
 999	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1000		link = "auto-negotiation";
1001
1002		if (vptr->mii_status & VELOCITY_SPEED_1000)
1003			speed = "1000";
1004		else if (vptr->mii_status & VELOCITY_SPEED_100)
1005			speed = "100";
1006		else
1007			speed = "10";
1008
1009		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1010			duplex = "full";
1011		else
1012			duplex = "half";
1013	} else {
1014		link = "forced";
1015
1016		switch (vptr->options.spd_dpx) {
1017		case SPD_DPX_1000_FULL:
1018			speed = "1000";
1019			duplex = "full";
1020			break;
1021		case SPD_DPX_100_HALF:
1022			speed = "100";
1023			duplex = "half";
1024			break;
1025		case SPD_DPX_100_FULL:
1026			speed = "100";
1027			duplex = "full";
1028			break;
1029		case SPD_DPX_10_HALF:
1030			speed = "10";
1031			duplex = "half";
1032			break;
1033		case SPD_DPX_10_FULL:
1034			speed = "10";
1035			duplex = "full";
1036			break;
1037		default:
1038			speed = "unknown";
1039			duplex = "unknown";
1040			break;
1041		}
1042	}
1043	netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n",
1044		      link, speed, duplex);
1045}
1046
1047/**
1048 *	enable_flow_control_ability	-	flow control
1049 *	@vptr: veloity to configure
1050 *
1051 *	Set up flow control according to the flow control options
1052 *	determined by the eeprom/configuration.
1053 */
1054static void enable_flow_control_ability(struct velocity_info *vptr)
1055{
1056
1057	struct mac_regs __iomem *regs = vptr->mac_regs;
1058
1059	switch (vptr->options.flow_cntl) {
1060
1061	case FLOW_CNTL_DEFAULT:
1062		if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1063			writel(CR0_FDXRFCEN, &regs->CR0Set);
1064		else
1065			writel(CR0_FDXRFCEN, &regs->CR0Clr);
1066
1067		if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1068			writel(CR0_FDXTFCEN, &regs->CR0Set);
1069		else
1070			writel(CR0_FDXTFCEN, &regs->CR0Clr);
1071		break;
1072
1073	case FLOW_CNTL_TX:
1074		writel(CR0_FDXTFCEN, &regs->CR0Set);
1075		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1076		break;
1077
1078	case FLOW_CNTL_RX:
1079		writel(CR0_FDXRFCEN, &regs->CR0Set);
1080		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1081		break;
1082
1083	case FLOW_CNTL_TX_RX:
1084		writel(CR0_FDXTFCEN, &regs->CR0Set);
1085		writel(CR0_FDXRFCEN, &regs->CR0Set);
1086		break;
1087
1088	case FLOW_CNTL_DISABLE:
1089		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1090		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1091		break;
1092
1093	default:
1094		break;
1095	}
1096
1097}
1098
1099/**
1100 *	velocity_soft_reset	-	soft reset
1101 *	@vptr: velocity to reset
1102 *
1103 *	Kick off a soft reset of the velocity adapter and then poll
1104 *	until the reset sequence has completed before returning.
1105 */
1106static int velocity_soft_reset(struct velocity_info *vptr)
1107{
1108	struct mac_regs __iomem *regs = vptr->mac_regs;
1109	int i = 0;
1110
1111	writel(CR0_SFRST, &regs->CR0Set);
1112
1113	for (i = 0; i < W_MAX_TIMEOUT; i++) {
1114		udelay(5);
1115		if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1116			break;
1117	}
1118
1119	if (i == W_MAX_TIMEOUT) {
1120		writel(CR0_FORSRST, &regs->CR0Set);
1121		/* FIXME: PCI POSTING */
1122		/* delay 2ms */
1123		mdelay(2);
1124	}
1125	return 0;
1126}
1127
1128/**
1129 *	velocity_set_multi	-	filter list change callback
1130 *	@dev: network device
1131 *
1132 *	Called by the network layer when the filter lists need to change
1133 *	for a velocity adapter. Reload the CAMs with the new address
1134 *	filter ruleset.
1135 */
1136static void velocity_set_multi(struct net_device *dev)
1137{
1138	struct velocity_info *vptr = netdev_priv(dev);
1139	struct mac_regs __iomem *regs = vptr->mac_regs;
1140	u8 rx_mode;
1141	int i;
1142	struct netdev_hw_addr *ha;
1143
1144	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1145		writel(0xffffffff, &regs->MARCAM[0]);
1146		writel(0xffffffff, &regs->MARCAM[4]);
1147		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1148	} else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1149		   (dev->flags & IFF_ALLMULTI)) {
1150		writel(0xffffffff, &regs->MARCAM[0]);
1151		writel(0xffffffff, &regs->MARCAM[4]);
1152		rx_mode = (RCR_AM | RCR_AB);
1153	} else {
1154		int offset = MCAM_SIZE - vptr->multicast_limit;
1155		mac_get_cam_mask(regs, vptr->mCAMmask);
1156
1157		i = 0;
1158		netdev_for_each_mc_addr(ha, dev) {
1159			mac_set_cam(regs, i + offset, ha->addr);
1160			vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1161			i++;
1162		}
1163
1164		mac_set_cam_mask(regs, vptr->mCAMmask);
1165		rx_mode = RCR_AM | RCR_AB | RCR_AP;
1166	}
1167	if (dev->mtu > 1500)
1168		rx_mode |= RCR_AL;
1169
1170	BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1171
1172}
1173
1174/*
1175 * MII access , media link mode setting functions
1176 */
1177
1178/**
1179 *	mii_init	-	set up MII
1180 *	@vptr: velocity adapter
1181 *	@mii_status:  links tatus
1182 *
1183 *	Set up the PHY for the current link state.
1184 */
1185static void mii_init(struct velocity_info *vptr, u32 mii_status)
1186{
1187	u16 BMCR;
1188
1189	switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1190	case PHYID_ICPLUS_IP101A:
1191		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1192						MII_ADVERTISE, vptr->mac_regs);
1193		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1195								vptr->mac_regs);
1196		else
1197			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1198								vptr->mac_regs);
1199		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1200		break;
1201	case PHYID_CICADA_CS8201:
1202		/*
1203		 *	Reset to hardware default
1204		 */
1205		MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1206		/*
1207		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1208		 *	off it in NWay-forced half mode for NWay-forced v.s.
1209		 *	legacy-forced issue.
1210		 */
1211		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1212			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1213		else
1214			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215		/*
1216		 *	Turn on Link/Activity LED enable bit for CIS8201
1217		 */
1218		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1219		break;
1220	case PHYID_VT3216_32BIT:
1221	case PHYID_VT3216_64BIT:
1222		/*
1223		 *	Reset to hardware default
1224		 */
1225		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1226		/*
1227		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1228		 *	off it in NWay-forced half mode for NWay-forced v.s.
1229		 *	legacy-forced issue
1230		 */
1231		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1232			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1233		else
1234			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1235		break;
1236
1237	case PHYID_MARVELL_1000:
1238	case PHYID_MARVELL_1000S:
1239		/*
1240		 *	Assert CRS on Transmit
1241		 */
1242		MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1243		/*
1244		 *	Reset to hardware default
1245		 */
1246		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1247		break;
1248	default:
1249		;
1250	}
1251	velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1252	if (BMCR & BMCR_ISOLATE) {
1253		BMCR &= ~BMCR_ISOLATE;
1254		velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1255	}
1256}
1257
1258/**
1259 * setup_queue_timers	-	Setup interrupt timers
1260 * @vptr: velocity adapter
1261 *
1262 * Setup interrupt frequency during suppression (timeout if the frame
1263 * count isn't filled).
1264 */
1265static void setup_queue_timers(struct velocity_info *vptr)
1266{
1267	/* Only for newer revisions */
1268	if (vptr->rev_id >= REV_ID_VT3216_A0) {
1269		u8 txqueue_timer = 0;
1270		u8 rxqueue_timer = 0;
1271
1272		if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1273				VELOCITY_SPEED_100)) {
1274			txqueue_timer = vptr->options.txqueue_timer;
1275			rxqueue_timer = vptr->options.rxqueue_timer;
1276		}
1277
1278		writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1279		writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1280	}
1281}
1282
1283/**
1284 * setup_adaptive_interrupts  -  Setup interrupt suppression
1285 * @vptr: velocity adapter
 
1286 *
1287 * The velocity is able to suppress interrupt during high interrupt load.
1288 * This function turns on that feature.
1289 */
1290static void setup_adaptive_interrupts(struct velocity_info *vptr)
1291{
1292	struct mac_regs __iomem *regs = vptr->mac_regs;
1293	u16 tx_intsup = vptr->options.tx_intsup;
1294	u16 rx_intsup = vptr->options.rx_intsup;
1295
1296	/* Setup default interrupt mask (will be changed below) */
1297	vptr->int_mask = INT_MASK_DEF;
1298
1299	/* Set Tx Interrupt Suppression Threshold */
1300	writeb(CAMCR_PS0, &regs->CAMCR);
1301	if (tx_intsup != 0) {
1302		vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1303				ISR_PTX2I | ISR_PTX3I);
1304		writew(tx_intsup, &regs->ISRCTL);
1305	} else
1306		writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1307
1308	/* Set Rx Interrupt Suppression Threshold */
1309	writeb(CAMCR_PS1, &regs->CAMCR);
1310	if (rx_intsup != 0) {
1311		vptr->int_mask &= ~ISR_PRXI;
1312		writew(rx_intsup, &regs->ISRCTL);
1313	} else
1314		writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1315
1316	/* Select page to interrupt hold timer */
1317	writeb(0, &regs->CAMCR);
1318}
1319
1320/**
1321 *	velocity_init_registers	-	initialise MAC registers
1322 *	@vptr: velocity to init
1323 *	@type: type of initialisation (hot or cold)
1324 *
1325 *	Initialise the MAC on a reset or on first set up on the
1326 *	hardware.
1327 */
1328static void velocity_init_registers(struct velocity_info *vptr,
1329				    enum velocity_init_type type)
1330{
1331	struct mac_regs __iomem *regs = vptr->mac_regs;
1332	struct net_device *netdev = vptr->netdev;
1333	int i, mii_status;
1334
1335	mac_wol_reset(regs);
1336
1337	switch (type) {
1338	case VELOCITY_INIT_RESET:
1339	case VELOCITY_INIT_WOL:
1340
1341		netif_stop_queue(netdev);
1342
1343		/*
1344		 *	Reset RX to prevent RX pointer not on the 4X location
1345		 */
1346		velocity_rx_reset(vptr);
1347		mac_rx_queue_run(regs);
1348		mac_rx_queue_wake(regs);
1349
1350		mii_status = velocity_get_opt_media_mode(vptr);
1351		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1352			velocity_print_link_status(vptr);
1353			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1354				netif_wake_queue(netdev);
1355		}
1356
1357		enable_flow_control_ability(vptr);
1358
1359		mac_clear_isr(regs);
1360		writel(CR0_STOP, &regs->CR0Clr);
1361		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1362							&regs->CR0Set);
1363
1364		break;
1365
1366	case VELOCITY_INIT_COLD:
1367	default:
1368		/*
1369		 *	Do reset
1370		 */
1371		velocity_soft_reset(vptr);
1372		mdelay(5);
1373
1374		if (!vptr->no_eeprom) {
1375			mac_eeprom_reload(regs);
1376			for (i = 0; i < 6; i++)
1377				writeb(netdev->dev_addr[i], regs->PAR + i);
1378		}
1379
1380		/*
1381		 *	clear Pre_ACPI bit.
1382		 */
1383		BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1384		mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1385		mac_set_dma_length(regs, vptr->options.DMA_length);
1386
1387		writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1388		/*
1389		 *	Back off algorithm use original IEEE standard
1390		 */
1391		BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1392
1393		/*
1394		 *	Init CAM filter
1395		 */
1396		velocity_init_cam_filter(vptr);
1397
1398		/*
1399		 *	Set packet filter: Receive directed and broadcast address
1400		 */
1401		velocity_set_multi(netdev);
1402
1403		/*
1404		 *	Enable MII auto-polling
1405		 */
1406		enable_mii_autopoll(regs);
1407
1408		setup_adaptive_interrupts(vptr);
1409
1410		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1411		writew(vptr->options.numrx - 1, &regs->RDCSize);
1412		mac_rx_queue_run(regs);
1413		mac_rx_queue_wake(regs);
1414
1415		writew(vptr->options.numtx - 1, &regs->TDCSize);
1416
1417		for (i = 0; i < vptr->tx.numq; i++) {
1418			writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1419			mac_tx_queue_run(regs, i);
1420		}
1421
1422		init_flow_control_register(vptr);
1423
1424		writel(CR0_STOP, &regs->CR0Clr);
1425		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1426
1427		mii_status = velocity_get_opt_media_mode(vptr);
1428		netif_stop_queue(netdev);
1429
1430		mii_init(vptr, mii_status);
1431
1432		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1433			velocity_print_link_status(vptr);
1434			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1435				netif_wake_queue(netdev);
1436		}
1437
1438		enable_flow_control_ability(vptr);
1439		mac_hw_mibs_init(regs);
1440		mac_write_int_mask(vptr->int_mask, regs);
1441		mac_clear_isr(regs);
1442
1443	}
1444}
1445
1446static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1447{
1448	struct mac_regs __iomem *regs = vptr->mac_regs;
1449	int avail, dirty, unusable;
1450
1451	/*
1452	 * RD number must be equal to 4X per hardware spec
1453	 * (programming guide rev 1.20, p.13)
1454	 */
1455	if (vptr->rx.filled < 4)
1456		return;
1457
1458	wmb();
1459
1460	unusable = vptr->rx.filled & 0x0003;
1461	dirty = vptr->rx.dirty - unusable;
1462	for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1463		dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1464		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1465	}
1466
1467	writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1468	vptr->rx.filled = unusable;
1469}
1470
1471/**
1472 *	velocity_init_dma_rings	-	set up DMA rings
1473 *	@vptr: Velocity to set up
1474 *
1475 *	Allocate PCI mapped DMA rings for the receive and transmit layer
1476 *	to use.
1477 */
1478static int velocity_init_dma_rings(struct velocity_info *vptr)
1479{
1480	struct velocity_opt *opt = &vptr->options;
1481	const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1482	const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1483	dma_addr_t pool_dma;
1484	void *pool;
1485	unsigned int i;
1486
1487	/*
1488	 * Allocate all RD/TD rings a single pool.
1489	 *
1490	 * dma_alloc_coherent() fulfills the requirement for 64 bytes
1491	 * alignment
1492	 */
1493	pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1494				    rx_ring_size, &pool_dma, GFP_ATOMIC);
1495	if (!pool) {
1496		dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1497			vptr->netdev->name);
1498		return -ENOMEM;
1499	}
1500
1501	vptr->rx.ring = pool;
1502	vptr->rx.pool_dma = pool_dma;
1503
1504	pool += rx_ring_size;
1505	pool_dma += rx_ring_size;
1506
1507	for (i = 0; i < vptr->tx.numq; i++) {
1508		vptr->tx.rings[i] = pool;
1509		vptr->tx.pool_dma[i] = pool_dma;
1510		pool += tx_ring_size;
1511		pool_dma += tx_ring_size;
1512	}
1513
1514	return 0;
1515}
1516
1517static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1518{
1519	vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1520}
1521
1522/**
1523 *	velocity_alloc_rx_buf	-	allocate aligned receive buffer
1524 *	@vptr: velocity
1525 *	@idx: ring index
1526 *
1527 *	Allocate a new full sized buffer for the reception of a frame and
1528 *	map it into PCI space for the hardware to use. The hardware
1529 *	requires *64* byte alignment of the buffer which makes life
1530 *	less fun than would be ideal.
1531 */
1532static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1533{
1534	struct rx_desc *rd = &(vptr->rx.ring[idx]);
1535	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1536
1537	rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1538	if (rd_info->skb == NULL)
1539		return -ENOMEM;
1540
1541	/*
1542	 *	Do the gymnastics to get the buffer head for data at
1543	 *	64byte alignment.
1544	 */
1545	skb_reserve(rd_info->skb,
1546			64 - ((unsigned long) rd_info->skb->data & 63));
1547	rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1548					vptr->rx.buf_sz, DMA_FROM_DEVICE);
1549
1550	/*
1551	 *	Fill in the descriptor to match
1552	 */
1553
1554	*((u32 *) & (rd->rdesc0)) = 0;
1555	rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1556	rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1557	rd->pa_high = 0;
1558	return 0;
1559}
1560
1561
1562static int velocity_rx_refill(struct velocity_info *vptr)
1563{
1564	int dirty = vptr->rx.dirty, done = 0;
1565
1566	do {
1567		struct rx_desc *rd = vptr->rx.ring + dirty;
1568
1569		/* Fine for an all zero Rx desc at init time as well */
1570		if (rd->rdesc0.len & OWNED_BY_NIC)
1571			break;
1572
1573		if (!vptr->rx.info[dirty].skb) {
1574			if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1575				break;
1576		}
1577		done++;
1578		dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1579	} while (dirty != vptr->rx.curr);
1580
1581	if (done) {
1582		vptr->rx.dirty = dirty;
1583		vptr->rx.filled += done;
1584	}
1585
1586	return done;
1587}
1588
1589/**
1590 *	velocity_free_rd_ring	-	free receive ring
1591 *	@vptr: velocity to clean up
1592 *
1593 *	Free the receive buffers for each ring slot and any
1594 *	attached socket buffers that need to go away.
1595 */
1596static void velocity_free_rd_ring(struct velocity_info *vptr)
1597{
1598	int i;
1599
1600	if (vptr->rx.info == NULL)
1601		return;
1602
1603	for (i = 0; i < vptr->options.numrx; i++) {
1604		struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1605		struct rx_desc *rd = vptr->rx.ring + i;
1606
1607		memset(rd, 0, sizeof(*rd));
1608
1609		if (!rd_info->skb)
1610			continue;
1611		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1612				 DMA_FROM_DEVICE);
1613		rd_info->skb_dma = 0;
1614
1615		dev_kfree_skb(rd_info->skb);
1616		rd_info->skb = NULL;
1617	}
1618
1619	kfree(vptr->rx.info);
1620	vptr->rx.info = NULL;
1621}
1622
1623/**
1624 *	velocity_init_rd_ring	-	set up receive ring
1625 *	@vptr: velocity to configure
1626 *
1627 *	Allocate and set up the receive buffers for each ring slot and
1628 *	assign them to the network adapter.
1629 */
1630static int velocity_init_rd_ring(struct velocity_info *vptr)
1631{
1632	int ret = -ENOMEM;
1633
1634	vptr->rx.info = kcalloc(vptr->options.numrx,
1635				sizeof(struct velocity_rd_info), GFP_KERNEL);
1636	if (!vptr->rx.info)
1637		goto out;
1638
1639	velocity_init_rx_ring_indexes(vptr);
1640
1641	if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1642		netdev_err(vptr->netdev, "failed to allocate RX buffer\n");
1643		velocity_free_rd_ring(vptr);
1644		goto out;
1645	}
1646
1647	ret = 0;
1648out:
1649	return ret;
1650}
1651
1652/**
1653 *	velocity_init_td_ring	-	set up transmit ring
1654 *	@vptr:	velocity
1655 *
1656 *	Set up the transmit ring and chain the ring pointers together.
1657 *	Returns zero on success or a negative posix errno code for
1658 *	failure.
1659 */
1660static int velocity_init_td_ring(struct velocity_info *vptr)
1661{
1662	int j;
1663
1664	/* Init the TD ring entries */
1665	for (j = 0; j < vptr->tx.numq; j++) {
1666
1667		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1668					    sizeof(struct velocity_td_info),
1669					    GFP_KERNEL);
1670		if (!vptr->tx.infos[j])	{
1671			while (--j >= 0)
1672				kfree(vptr->tx.infos[j]);
1673			return -ENOMEM;
1674		}
1675
1676		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1677	}
1678	return 0;
1679}
1680
1681/**
1682 *	velocity_free_dma_rings	-	free PCI ring pointers
1683 *	@vptr: Velocity to free from
1684 *
1685 *	Clean up the PCI ring buffers allocated to this velocity.
1686 */
1687static void velocity_free_dma_rings(struct velocity_info *vptr)
1688{
1689	const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1690		vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1691
1692	dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1693}
1694
1695static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1696{
1697	int ret;
1698
1699	velocity_set_rxbufsize(vptr, mtu);
1700
1701	ret = velocity_init_dma_rings(vptr);
1702	if (ret < 0)
1703		goto out;
1704
1705	ret = velocity_init_rd_ring(vptr);
1706	if (ret < 0)
1707		goto err_free_dma_rings_0;
1708
1709	ret = velocity_init_td_ring(vptr);
1710	if (ret < 0)
1711		goto err_free_rd_ring_1;
1712out:
1713	return ret;
1714
1715err_free_rd_ring_1:
1716	velocity_free_rd_ring(vptr);
1717err_free_dma_rings_0:
1718	velocity_free_dma_rings(vptr);
1719	goto out;
1720}
1721
1722/**
1723 *	velocity_free_tx_buf	-	free transmit buffer
1724 *	@vptr: velocity
1725 *	@tdinfo: buffer
1726 *	@td: transmit descriptor to free
1727 *
1728 *	Release an transmit buffer. If the buffer was preallocated then
1729 *	recycle it, if not then unmap the buffer.
1730 */
1731static void velocity_free_tx_buf(struct velocity_info *vptr,
1732		struct velocity_td_info *tdinfo, struct tx_desc *td)
1733{
1734	struct sk_buff *skb = tdinfo->skb;
1735	int i;
1736
1737	/*
1738	 *	Don't unmap the pre-allocated tx_bufs
1739	 */
1740	for (i = 0; i < tdinfo->nskb_dma; i++) {
1741		size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1742
1743		/* For scatter-gather */
1744		if (skb_shinfo(skb)->nr_frags > 0)
1745			pktlen = max_t(size_t, pktlen,
1746				       td->td_buf[i].size & ~TD_QUEUE);
1747
1748		dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1749				 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1750	}
1751	dev_consume_skb_irq(skb);
1752	tdinfo->skb = NULL;
1753}
1754
1755/*
1756 *	FIXME: could we merge this with velocity_free_tx_buf ?
1757 */
1758static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1759							 int q, int n)
1760{
1761	struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1762	int i;
1763
1764	if (td_info == NULL)
1765		return;
1766
1767	if (td_info->skb) {
1768		for (i = 0; i < td_info->nskb_dma; i++) {
1769			if (td_info->skb_dma[i]) {
1770				dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1771					td_info->skb->len, DMA_TO_DEVICE);
1772				td_info->skb_dma[i] = 0;
1773			}
1774		}
1775		dev_kfree_skb(td_info->skb);
1776		td_info->skb = NULL;
1777	}
1778}
1779
1780/**
1781 *	velocity_free_td_ring	-	free td ring
1782 *	@vptr: velocity
1783 *
1784 *	Free up the transmit ring for this particular velocity adapter.
1785 *	We free the ring contents but not the ring itself.
1786 */
1787static void velocity_free_td_ring(struct velocity_info *vptr)
1788{
1789	int i, j;
1790
1791	for (j = 0; j < vptr->tx.numq; j++) {
1792		if (vptr->tx.infos[j] == NULL)
1793			continue;
1794		for (i = 0; i < vptr->options.numtx; i++)
1795			velocity_free_td_ring_entry(vptr, j, i);
1796
1797		kfree(vptr->tx.infos[j]);
1798		vptr->tx.infos[j] = NULL;
1799	}
1800}
1801
1802static void velocity_free_rings(struct velocity_info *vptr)
1803{
1804	velocity_free_td_ring(vptr);
1805	velocity_free_rd_ring(vptr);
1806	velocity_free_dma_rings(vptr);
1807}
1808
1809/**
1810 *	velocity_error	-	handle error from controller
1811 *	@vptr: velocity
1812 *	@status: card status
1813 *
1814 *	Process an error report from the hardware and attempt to recover
1815 *	the card itself. At the moment we cannot recover from some
1816 *	theoretically impossible errors but this could be fixed using
1817 *	the pci_device_failed logic to bounce the hardware
1818 *
1819 */
1820static void velocity_error(struct velocity_info *vptr, int status)
1821{
1822
1823	if (status & ISR_TXSTLI) {
1824		struct mac_regs __iomem *regs = vptr->mac_regs;
1825
1826		netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n",
1827			   readw(&regs->TDIdx[0]));
1828		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1829		writew(TRDCSR_RUN, &regs->TDCSRClr);
1830		netif_stop_queue(vptr->netdev);
1831
1832		/* FIXME: port over the pci_device_failed code and use it
1833		   here */
1834	}
1835
1836	if (status & ISR_SRCI) {
1837		struct mac_regs __iomem *regs = vptr->mac_regs;
1838		int linked;
1839
1840		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1841			vptr->mii_status = check_connection_type(regs);
1842
1843			/*
1844			 *	If it is a 3119, disable frame bursting in
1845			 *	halfduplex mode and enable it in fullduplex
1846			 *	 mode
1847			 */
1848			if (vptr->rev_id < REV_ID_VT3216_A0) {
1849				if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1850					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1851				else
1852					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1853			}
1854			/*
1855			 *	Only enable CD heart beat counter in 10HD mode
1856			 */
1857			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1858				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1859			else
1860				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1861
1862			setup_queue_timers(vptr);
1863		}
1864		/*
1865		 *	Get link status from PHYSR0
1866		 */
1867		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1868
1869		if (linked) {
1870			vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1871			netif_carrier_on(vptr->netdev);
1872		} else {
1873			vptr->mii_status |= VELOCITY_LINK_FAIL;
1874			netif_carrier_off(vptr->netdev);
1875		}
1876
1877		velocity_print_link_status(vptr);
1878		enable_flow_control_ability(vptr);
1879
1880		/*
1881		 *	Re-enable auto-polling because SRCI will disable
1882		 *	auto-polling
1883		 */
1884
1885		enable_mii_autopoll(regs);
1886
1887		if (vptr->mii_status & VELOCITY_LINK_FAIL)
1888			netif_stop_queue(vptr->netdev);
1889		else
1890			netif_wake_queue(vptr->netdev);
1891
1892	}
1893	if (status & ISR_MIBFI)
1894		velocity_update_hw_mibs(vptr);
1895	if (status & ISR_LSTEI)
1896		mac_rx_queue_wake(vptr->mac_regs);
1897}
1898
1899/**
1900 *	velocity_tx_srv		-	transmit interrupt service
1901 *	@vptr: Velocity
1902 *
1903 *	Scan the queues looking for transmitted packets that
1904 *	we can complete and clean up. Update any statistics as
1905 *	necessary/
1906 */
1907static int velocity_tx_srv(struct velocity_info *vptr)
1908{
1909	struct tx_desc *td;
1910	int qnum;
1911	int full = 0;
1912	int idx;
1913	int works = 0;
1914	struct velocity_td_info *tdinfo;
1915	struct net_device_stats *stats = &vptr->netdev->stats;
1916
1917	for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1918		for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1919			idx = (idx + 1) % vptr->options.numtx) {
1920
1921			/*
1922			 *	Get Tx Descriptor
1923			 */
1924			td = &(vptr->tx.rings[qnum][idx]);
1925			tdinfo = &(vptr->tx.infos[qnum][idx]);
1926
1927			if (td->tdesc0.len & OWNED_BY_NIC)
1928				break;
1929
1930			if ((works++ > 15))
1931				break;
1932
1933			if (td->tdesc0.TSR & TSR0_TERR) {
1934				stats->tx_errors++;
1935				stats->tx_dropped++;
1936				if (td->tdesc0.TSR & TSR0_CDH)
1937					stats->tx_heartbeat_errors++;
1938				if (td->tdesc0.TSR & TSR0_CRS)
1939					stats->tx_carrier_errors++;
1940				if (td->tdesc0.TSR & TSR0_ABT)
1941					stats->tx_aborted_errors++;
1942				if (td->tdesc0.TSR & TSR0_OWC)
1943					stats->tx_window_errors++;
1944			} else {
1945				stats->tx_packets++;
1946				stats->tx_bytes += tdinfo->skb->len;
1947			}
1948			velocity_free_tx_buf(vptr, tdinfo, td);
1949			vptr->tx.used[qnum]--;
1950		}
1951		vptr->tx.tail[qnum] = idx;
1952
1953		if (AVAIL_TD(vptr, qnum) < 1)
1954			full = 1;
1955	}
1956	/*
1957	 *	Look to see if we should kick the transmit network
1958	 *	layer for more work.
1959	 */
1960	if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1961	    (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1962		netif_wake_queue(vptr->netdev);
1963	}
1964	return works;
1965}
1966
1967/**
1968 *	velocity_rx_csum	-	checksum process
1969 *	@rd: receive packet descriptor
1970 *	@skb: network layer packet buffer
1971 *
1972 *	Process the status bits for the received packet and determine
1973 *	if the checksum was computed and verified by the hardware
1974 */
1975static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1976{
1977	skb_checksum_none_assert(skb);
1978
1979	if (rd->rdesc1.CSM & CSM_IPKT) {
1980		if (rd->rdesc1.CSM & CSM_IPOK) {
1981			if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1982					(rd->rdesc1.CSM & CSM_UDPKT)) {
1983				if (!(rd->rdesc1.CSM & CSM_TUPOK))
1984					return;
1985			}
1986			skb->ip_summed = CHECKSUM_UNNECESSARY;
1987		}
1988	}
1989}
1990
1991/**
1992 *	velocity_rx_copy	-	in place Rx copy for small packets
1993 *	@rx_skb: network layer packet buffer candidate
1994 *	@pkt_size: received data size
1995 *	@vptr: velocity adapter
 
1996 *
1997 *	Replace the current skb that is scheduled for Rx processing by a
1998 *	shorter, immediately allocated skb, if the received packet is small
1999 *	enough. This function returns a negative value if the received
2000 *	packet is too big or if memory is exhausted.
2001 */
2002static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
2003			    struct velocity_info *vptr)
2004{
2005	int ret = -1;
2006	if (pkt_size < rx_copybreak) {
2007		struct sk_buff *new_skb;
2008
2009		new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
2010		if (new_skb) {
2011			new_skb->ip_summed = rx_skb[0]->ip_summed;
2012			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2013			*rx_skb = new_skb;
2014			ret = 0;
2015		}
2016
2017	}
2018	return ret;
2019}
2020
2021/**
2022 *	velocity_iph_realign	-	IP header alignment
2023 *	@vptr: velocity we are handling
2024 *	@skb: network layer packet buffer
2025 *	@pkt_size: received data size
2026 *
2027 *	Align IP header on a 2 bytes boundary. This behavior can be
2028 *	configured by the user.
2029 */
2030static inline void velocity_iph_realign(struct velocity_info *vptr,
2031					struct sk_buff *skb, int pkt_size)
2032{
2033	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2034		memmove(skb->data + 2, skb->data, pkt_size);
2035		skb_reserve(skb, 2);
2036	}
2037}
2038
2039/**
2040 *	velocity_receive_frame	-	received packet processor
2041 *	@vptr: velocity we are handling
2042 *	@idx: ring index
2043 *
2044 *	A packet has arrived. We process the packet and if appropriate
2045 *	pass the frame up the network stack
2046 */
2047static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2048{
2049	struct net_device_stats *stats = &vptr->netdev->stats;
2050	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2051	struct rx_desc *rd = &(vptr->rx.ring[idx]);
2052	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2053	struct sk_buff *skb;
2054
2055	if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2056		if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2057			netdev_err(vptr->netdev, "received frame spans multiple RDs\n");
2058		stats->rx_length_errors++;
2059		return -EINVAL;
2060	}
2061
2062	if (rd->rdesc0.RSR & RSR_MAR)
2063		stats->multicast++;
2064
2065	skb = rd_info->skb;
2066
2067	dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2068				    vptr->rx.buf_sz, DMA_FROM_DEVICE);
2069
2070	velocity_rx_csum(rd, skb);
2071
2072	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2073		velocity_iph_realign(vptr, skb, pkt_len);
2074		rd_info->skb = NULL;
2075		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2076				 DMA_FROM_DEVICE);
2077	} else {
2078		dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2079					   vptr->rx.buf_sz, DMA_FROM_DEVICE);
2080	}
2081
2082	skb_put(skb, pkt_len - 4);
2083	skb->protocol = eth_type_trans(skb, vptr->netdev);
2084
2085	if (rd->rdesc0.RSR & RSR_DETAG) {
2086		u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2087
2088		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2089	}
2090	netif_receive_skb(skb);
2091
2092	stats->rx_bytes += pkt_len;
2093	stats->rx_packets++;
2094
2095	return 0;
2096}
2097
2098/**
2099 *	velocity_rx_srv		-	service RX interrupt
2100 *	@vptr: velocity
2101 *	@budget_left: remaining budget
2102 *
2103 *	Walk the receive ring of the velocity adapter and remove
2104 *	any received packets from the receive queue. Hand the ring
2105 *	slots back to the adapter for reuse.
2106 */
2107static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2108{
2109	struct net_device_stats *stats = &vptr->netdev->stats;
2110	int rd_curr = vptr->rx.curr;
2111	int works = 0;
2112
2113	while (works < budget_left) {
2114		struct rx_desc *rd = vptr->rx.ring + rd_curr;
2115
2116		if (!vptr->rx.info[rd_curr].skb)
2117			break;
2118
2119		if (rd->rdesc0.len & OWNED_BY_NIC)
2120			break;
2121
2122		rmb();
2123
2124		/*
2125		 *	Don't drop CE or RL error frame although RXOK is off
2126		 */
2127		if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2128			if (velocity_receive_frame(vptr, rd_curr) < 0)
2129				stats->rx_dropped++;
2130		} else {
2131			if (rd->rdesc0.RSR & RSR_CRC)
2132				stats->rx_crc_errors++;
2133			if (rd->rdesc0.RSR & RSR_FAE)
2134				stats->rx_frame_errors++;
2135
2136			stats->rx_dropped++;
2137		}
2138
2139		rd->size |= RX_INTEN;
2140
2141		rd_curr++;
2142		if (rd_curr >= vptr->options.numrx)
2143			rd_curr = 0;
2144		works++;
2145	}
2146
2147	vptr->rx.curr = rd_curr;
2148
2149	if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2150		velocity_give_many_rx_descs(vptr);
2151
2152	VAR_USED(stats);
2153	return works;
2154}
2155
2156static int velocity_poll(struct napi_struct *napi, int budget)
2157{
2158	struct velocity_info *vptr = container_of(napi,
2159			struct velocity_info, napi);
2160	unsigned int rx_done;
2161	unsigned long flags;
2162
2163	/*
2164	 * Do rx and tx twice for performance (taken from the VIA
2165	 * out-of-tree driver).
2166	 */
2167	rx_done = velocity_rx_srv(vptr, budget);
2168	spin_lock_irqsave(&vptr->lock, flags);
2169	velocity_tx_srv(vptr);
2170	/* If budget not fully consumed, exit the polling mode */
2171	if (rx_done < budget) {
2172		napi_complete_done(napi, rx_done);
2173		mac_enable_int(vptr->mac_regs);
2174	}
2175	spin_unlock_irqrestore(&vptr->lock, flags);
2176
2177	return rx_done;
2178}
2179
2180/**
2181 *	velocity_intr		-	interrupt callback
2182 *	@irq: interrupt number
2183 *	@dev_instance: interrupting device
2184 *
2185 *	Called whenever an interrupt is generated by the velocity
2186 *	adapter IRQ line. We may not be the source of the interrupt
2187 *	and need to identify initially if we are, and if not exit as
2188 *	efficiently as possible.
2189 */
2190static irqreturn_t velocity_intr(int irq, void *dev_instance)
2191{
2192	struct net_device *dev = dev_instance;
2193	struct velocity_info *vptr = netdev_priv(dev);
2194	u32 isr_status;
2195
2196	spin_lock(&vptr->lock);
2197	isr_status = mac_read_isr(vptr->mac_regs);
2198
2199	/* Not us ? */
2200	if (isr_status == 0) {
2201		spin_unlock(&vptr->lock);
2202		return IRQ_NONE;
2203	}
2204
2205	/* Ack the interrupt */
2206	mac_write_isr(vptr->mac_regs, isr_status);
2207
2208	if (likely(napi_schedule_prep(&vptr->napi))) {
2209		mac_disable_int(vptr->mac_regs);
2210		__napi_schedule(&vptr->napi);
2211	}
2212
2213	if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2214		velocity_error(vptr, isr_status);
2215
2216	spin_unlock(&vptr->lock);
2217
2218	return IRQ_HANDLED;
2219}
2220
2221/**
2222 *	velocity_open		-	interface activation callback
2223 *	@dev: network layer device to open
2224 *
2225 *	Called when the network layer brings the interface up. Returns
2226 *	a negative posix error code on failure, or zero on success.
2227 *
2228 *	All the ring allocation and set up is done on open for this
2229 *	adapter to minimise memory usage when inactive
2230 */
2231static int velocity_open(struct net_device *dev)
2232{
2233	struct velocity_info *vptr = netdev_priv(dev);
2234	int ret;
2235
2236	ret = velocity_init_rings(vptr, dev->mtu);
2237	if (ret < 0)
2238		goto out;
2239
2240	/* Ensure chip is running */
2241	velocity_set_power_state(vptr, PCI_D0);
2242
2243	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2244
2245	ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2246			  dev->name, dev);
2247	if (ret < 0) {
2248		/* Power down the chip */
2249		velocity_set_power_state(vptr, PCI_D3hot);
2250		velocity_free_rings(vptr);
2251		goto out;
2252	}
2253
2254	velocity_give_many_rx_descs(vptr);
2255
2256	mac_enable_int(vptr->mac_regs);
2257	netif_start_queue(dev);
2258	napi_enable(&vptr->napi);
2259	vptr->flags |= VELOCITY_FLAGS_OPENED;
2260out:
2261	return ret;
2262}
2263
2264/**
2265 *	velocity_shutdown	-	shut down the chip
2266 *	@vptr: velocity to deactivate
2267 *
2268 *	Shuts down the internal operations of the velocity and
2269 *	disables interrupts, autopolling, transmit and receive
2270 */
2271static void velocity_shutdown(struct velocity_info *vptr)
2272{
2273	struct mac_regs __iomem *regs = vptr->mac_regs;
2274	mac_disable_int(regs);
2275	writel(CR0_STOP, &regs->CR0Set);
2276	writew(0xFFFF, &regs->TDCSRClr);
2277	writeb(0xFF, &regs->RDCSRClr);
2278	safe_disable_mii_autopoll(regs);
2279	mac_clear_isr(regs);
2280}
2281
2282/**
2283 *	velocity_change_mtu	-	MTU change callback
2284 *	@dev: network device
2285 *	@new_mtu: desired MTU
2286 *
2287 *	Handle requests from the networking layer for MTU change on
2288 *	this interface. It gets called on a change by the network layer.
2289 *	Return zero for success or negative posix error code.
2290 */
2291static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2292{
2293	struct velocity_info *vptr = netdev_priv(dev);
2294	int ret = 0;
2295
2296	if (!netif_running(dev)) {
2297		dev->mtu = new_mtu;
2298		goto out_0;
2299	}
2300
2301	if (dev->mtu != new_mtu) {
2302		struct velocity_info *tmp_vptr;
2303		unsigned long flags;
2304		struct rx_info rx;
2305		struct tx_info tx;
2306
2307		tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2308		if (!tmp_vptr) {
2309			ret = -ENOMEM;
2310			goto out_0;
2311		}
2312
2313		tmp_vptr->netdev = dev;
2314		tmp_vptr->pdev = vptr->pdev;
2315		tmp_vptr->dev = vptr->dev;
2316		tmp_vptr->options = vptr->options;
2317		tmp_vptr->tx.numq = vptr->tx.numq;
2318
2319		ret = velocity_init_rings(tmp_vptr, new_mtu);
2320		if (ret < 0)
2321			goto out_free_tmp_vptr_1;
2322
2323		napi_disable(&vptr->napi);
2324
2325		spin_lock_irqsave(&vptr->lock, flags);
2326
2327		netif_stop_queue(dev);
2328		velocity_shutdown(vptr);
2329
2330		rx = vptr->rx;
2331		tx = vptr->tx;
2332
2333		vptr->rx = tmp_vptr->rx;
2334		vptr->tx = tmp_vptr->tx;
2335
2336		tmp_vptr->rx = rx;
2337		tmp_vptr->tx = tx;
2338
2339		dev->mtu = new_mtu;
2340
2341		velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2342
2343		velocity_give_many_rx_descs(vptr);
2344
2345		napi_enable(&vptr->napi);
2346
2347		mac_enable_int(vptr->mac_regs);
2348		netif_start_queue(dev);
2349
2350		spin_unlock_irqrestore(&vptr->lock, flags);
2351
2352		velocity_free_rings(tmp_vptr);
2353
2354out_free_tmp_vptr_1:
2355		kfree(tmp_vptr);
2356	}
2357out_0:
2358	return ret;
2359}
2360
2361#ifdef CONFIG_NET_POLL_CONTROLLER
2362/**
2363 *  velocity_poll_controller		-	Velocity Poll controller function
2364 *  @dev: network device
2365 *
2366 *
2367 *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
2368 *  with interrupts disabled.
2369 */
2370static void velocity_poll_controller(struct net_device *dev)
2371{
2372	disable_irq(dev->irq);
2373	velocity_intr(dev->irq, dev);
2374	enable_irq(dev->irq);
2375}
2376#endif
2377
2378/**
2379 *	velocity_mii_ioctl		-	MII ioctl handler
2380 *	@dev: network device
2381 *	@ifr: the ifreq block for the ioctl
2382 *	@cmd: the command
2383 *
2384 *	Process MII requests made via ioctl from the network layer. These
2385 *	are used by tools like kudzu to interrogate the link state of the
2386 *	hardware
2387 */
2388static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2389{
2390	struct velocity_info *vptr = netdev_priv(dev);
2391	struct mac_regs __iomem *regs = vptr->mac_regs;
2392	unsigned long flags;
2393	struct mii_ioctl_data *miidata = if_mii(ifr);
2394	int err;
2395
2396	switch (cmd) {
2397	case SIOCGMIIPHY:
2398		miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2399		break;
2400	case SIOCGMIIREG:
2401		if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2402			return -ETIMEDOUT;
2403		break;
2404	case SIOCSMIIREG:
2405		spin_lock_irqsave(&vptr->lock, flags);
2406		err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2407		spin_unlock_irqrestore(&vptr->lock, flags);
2408		check_connection_type(vptr->mac_regs);
2409		if (err)
2410			return err;
2411		break;
2412	default:
2413		return -EOPNOTSUPP;
2414	}
2415	return 0;
2416}
2417
2418/**
2419 *	velocity_ioctl		-	ioctl entry point
2420 *	@dev: network device
2421 *	@rq: interface request ioctl
2422 *	@cmd: command code
2423 *
2424 *	Called when the user issues an ioctl request to the network
2425 *	device in question. The velocity interface supports MII.
2426 */
2427static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2428{
2429	struct velocity_info *vptr = netdev_priv(dev);
2430	int ret;
2431
2432	/* If we are asked for information and the device is power
2433	   saving then we need to bring the device back up to talk to it */
2434
2435	if (!netif_running(dev))
2436		velocity_set_power_state(vptr, PCI_D0);
2437
2438	switch (cmd) {
2439	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
2440	case SIOCGMIIREG:	/* Read MII PHY register. */
2441	case SIOCSMIIREG:	/* Write to MII PHY register. */
2442		ret = velocity_mii_ioctl(dev, rq, cmd);
2443		break;
2444
2445	default:
2446		ret = -EOPNOTSUPP;
2447	}
2448	if (!netif_running(dev))
2449		velocity_set_power_state(vptr, PCI_D3hot);
2450
2451
2452	return ret;
2453}
2454
2455/**
2456 *	velocity_get_stats	-	statistics callback
2457 *	@dev: network device
2458 *
2459 *	Callback from the network layer to allow driver statistics
2460 *	to be resynchronized with hardware collected state. In the
2461 *	case of the velocity we need to pull the MIB counters from
2462 *	the hardware into the counters before letting the network
2463 *	layer display them.
2464 */
2465static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2466{
2467	struct velocity_info *vptr = netdev_priv(dev);
2468
2469	/* If the hardware is down, don't touch MII */
2470	if (!netif_running(dev))
2471		return &dev->stats;
2472
2473	spin_lock_irq(&vptr->lock);
2474	velocity_update_hw_mibs(vptr);
2475	spin_unlock_irq(&vptr->lock);
2476
2477	dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2478	dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2479	dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2480
2481//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2482	dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2483	/* detailed rx_errors: */
2484//  unsigned long   rx_length_errors;
2485//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2486	dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2487//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2488//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2489//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2490
2491	/* detailed tx_errors */
2492//  unsigned long   tx_fifo_errors;
2493
2494	return &dev->stats;
2495}
2496
2497/**
2498 *	velocity_close		-	close adapter callback
2499 *	@dev: network device
2500 *
2501 *	Callback from the network layer when the velocity is being
2502 *	deactivated by the network layer
2503 */
2504static int velocity_close(struct net_device *dev)
2505{
2506	struct velocity_info *vptr = netdev_priv(dev);
2507
2508	napi_disable(&vptr->napi);
2509	netif_stop_queue(dev);
2510	velocity_shutdown(vptr);
2511
2512	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2513		velocity_get_ip(vptr);
2514
2515	free_irq(dev->irq, dev);
2516
2517	velocity_free_rings(vptr);
2518
2519	vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2520	return 0;
2521}
2522
2523/**
2524 *	velocity_xmit		-	transmit packet callback
2525 *	@skb: buffer to transmit
2526 *	@dev: network device
2527 *
2528 *	Called by the network layer to request a packet is queued to
2529 *	the velocity. Returns zero on success.
2530 */
2531static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2532				 struct net_device *dev)
2533{
2534	struct velocity_info *vptr = netdev_priv(dev);
2535	int qnum = 0;
2536	struct tx_desc *td_ptr;
2537	struct velocity_td_info *tdinfo;
2538	unsigned long flags;
2539	int pktlen;
2540	int index, prev;
2541	int i = 0;
2542
2543	if (skb_padto(skb, ETH_ZLEN))
2544		goto out;
2545
2546	/* The hardware can handle at most 7 memory segments, so merge
2547	 * the skb if there are more */
2548	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2549		dev_kfree_skb_any(skb);
2550		return NETDEV_TX_OK;
2551	}
2552
2553	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2554			max_t(unsigned int, skb->len, ETH_ZLEN) :
2555				skb_headlen(skb);
2556
2557	spin_lock_irqsave(&vptr->lock, flags);
2558
2559	index = vptr->tx.curr[qnum];
2560	td_ptr = &(vptr->tx.rings[qnum][index]);
2561	tdinfo = &(vptr->tx.infos[qnum][index]);
2562
2563	td_ptr->tdesc1.TCR = TCR0_TIC;
2564	td_ptr->td_buf[0].size &= ~TD_QUEUE;
2565
2566	/*
2567	 *	Map the linear network buffer into PCI space and
2568	 *	add it to the transmit ring.
2569	 */
2570	tdinfo->skb = skb;
2571	tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2572								DMA_TO_DEVICE);
2573	td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2574	td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2575	td_ptr->td_buf[0].pa_high = 0;
2576	td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2577
2578	/* Handle fragments */
2579	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2580		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2581
2582		tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2583							  frag, 0,
2584							  skb_frag_size(frag),
2585							  DMA_TO_DEVICE);
2586
2587		td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2588		td_ptr->td_buf[i + 1].pa_high = 0;
2589		td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2590	}
2591	tdinfo->nskb_dma = i + 1;
2592
2593	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2594
2595	if (skb_vlan_tag_present(skb)) {
2596		td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2597		td_ptr->tdesc1.TCR |= TCR0_VETAG;
2598	}
2599
2600	/*
2601	 *	Handle hardware checksum
2602	 */
2603	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2604		const struct iphdr *ip = ip_hdr(skb);
2605		if (ip->protocol == IPPROTO_TCP)
2606			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2607		else if (ip->protocol == IPPROTO_UDP)
2608			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2609		td_ptr->tdesc1.TCR |= TCR0_IPCK;
2610	}
2611
2612	prev = index - 1;
2613	if (prev < 0)
2614		prev = vptr->options.numtx - 1;
2615	td_ptr->tdesc0.len |= OWNED_BY_NIC;
2616	vptr->tx.used[qnum]++;
2617	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2618
2619	if (AVAIL_TD(vptr, qnum) < 1)
2620		netif_stop_queue(dev);
2621
2622	td_ptr = &(vptr->tx.rings[qnum][prev]);
2623	td_ptr->td_buf[0].size |= TD_QUEUE;
2624	mac_tx_queue_wake(vptr->mac_regs, qnum);
2625
2626	spin_unlock_irqrestore(&vptr->lock, flags);
2627out:
2628	return NETDEV_TX_OK;
2629}
2630
2631static const struct net_device_ops velocity_netdev_ops = {
2632	.ndo_open		= velocity_open,
2633	.ndo_stop		= velocity_close,
2634	.ndo_start_xmit		= velocity_xmit,
2635	.ndo_get_stats		= velocity_get_stats,
2636	.ndo_validate_addr	= eth_validate_addr,
2637	.ndo_set_mac_address	= eth_mac_addr,
2638	.ndo_set_rx_mode	= velocity_set_multi,
2639	.ndo_change_mtu		= velocity_change_mtu,
2640	.ndo_eth_ioctl		= velocity_ioctl,
2641	.ndo_vlan_rx_add_vid	= velocity_vlan_rx_add_vid,
2642	.ndo_vlan_rx_kill_vid	= velocity_vlan_rx_kill_vid,
2643#ifdef CONFIG_NET_POLL_CONTROLLER
2644	.ndo_poll_controller = velocity_poll_controller,
2645#endif
2646};
2647
2648/**
2649 *	velocity_init_info	-	init private data
 
2650 *	@vptr: Velocity info
2651 *	@info: Board type
2652 *
2653 *	Set up the initial velocity_info struct for the device that has been
2654 *	discovered.
2655 */
2656static void velocity_init_info(struct velocity_info *vptr,
2657				const struct velocity_info_tbl *info)
2658{
2659	vptr->chip_id = info->chip_id;
2660	vptr->tx.numq = info->txqueue;
2661	vptr->multicast_limit = MCAM_SIZE;
2662	spin_lock_init(&vptr->lock);
2663}
2664
2665/**
2666 *	velocity_get_pci_info	-	retrieve PCI info for device
2667 *	@vptr: velocity device
 
2668 *
2669 *	Retrieve the PCI configuration space data that interests us from
2670 *	the kernel PCI layer
2671 */
2672static int velocity_get_pci_info(struct velocity_info *vptr)
2673{
2674	struct pci_dev *pdev = vptr->pdev;
2675
2676	pci_set_master(pdev);
2677
2678	vptr->ioaddr = pci_resource_start(pdev, 0);
2679	vptr->memaddr = pci_resource_start(pdev, 1);
2680
2681	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2682		dev_err(&pdev->dev,
2683			   "region #0 is not an I/O resource, aborting.\n");
2684		return -EINVAL;
2685	}
2686
2687	if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2688		dev_err(&pdev->dev,
2689			   "region #1 is an I/O resource, aborting.\n");
2690		return -EINVAL;
2691	}
2692
2693	if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2694		dev_err(&pdev->dev, "region #1 is too small.\n");
2695		return -EINVAL;
2696	}
2697
2698	return 0;
2699}
2700
2701/**
2702 *	velocity_get_platform_info - retrieve platform info for device
2703 *	@vptr: velocity device
 
2704 *
2705 *	Retrieve the Platform configuration data that interests us
2706 */
2707static int velocity_get_platform_info(struct velocity_info *vptr)
2708{
2709	struct resource res;
2710	int ret;
2711
2712	vptr->no_eeprom = of_property_read_bool(vptr->dev->of_node, "no-eeprom");
 
2713
2714	ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2715	if (ret) {
2716		dev_err(vptr->dev, "unable to find memory address\n");
2717		return ret;
2718	}
2719
2720	vptr->memaddr = res.start;
2721
2722	if (resource_size(&res) < VELOCITY_IO_SIZE) {
2723		dev_err(vptr->dev, "memory region is too small.\n");
2724		return -EINVAL;
2725	}
2726
2727	return 0;
2728}
2729
2730/**
2731 *	velocity_print_info	-	per driver data
2732 *	@vptr: velocity
2733 *
2734 *	Print per driver data as the kernel driver finds Velocity
2735 *	hardware
2736 */
2737static void velocity_print_info(struct velocity_info *vptr)
2738{
2739	netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n",
2740		    get_chip_name(vptr->chip_id), vptr->netdev->dev_addr);
2741}
2742
2743static u32 velocity_get_link(struct net_device *dev)
2744{
2745	struct velocity_info *vptr = netdev_priv(dev);
2746	struct mac_regs __iomem *regs = vptr->mac_regs;
2747	return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2748}
2749
2750/**
2751 *	velocity_probe - set up discovered velocity device
2752 *	@dev: PCI device
2753 *	@info: table of match
2754 *	@irq: interrupt info
2755 *	@bustype: bus that device is connected to
2756 *
2757 *	Configure a discovered adapter from scratch. Return a negative
2758 *	errno error code on failure paths.
2759 */
2760static int velocity_probe(struct device *dev, int irq,
2761			   const struct velocity_info_tbl *info,
2762			   enum velocity_bus_type bustype)
2763{
2764	struct net_device *netdev;
2765	int i;
2766	struct velocity_info *vptr;
2767	struct mac_regs __iomem *regs;
2768	int ret = -ENOMEM;
2769	u8 addr[ETH_ALEN];
2770
2771	/* FIXME: this driver, like almost all other ethernet drivers,
2772	 * can support more than MAX_UNITS.
2773	 */
2774	if (velocity_nics >= MAX_UNITS) {
2775		dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2776		return -ENODEV;
2777	}
2778
2779	netdev = alloc_etherdev(sizeof(struct velocity_info));
2780	if (!netdev)
2781		goto out;
2782
2783	/* Chain it all together */
2784
2785	SET_NETDEV_DEV(netdev, dev);
2786	vptr = netdev_priv(netdev);
2787
2788	pr_info_once("%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2789	pr_info_once("Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2790	pr_info_once("Copyright (c) 2004 Red Hat Inc.\n");
2791
2792	netdev->irq = irq;
2793	vptr->netdev = netdev;
2794	vptr->dev = dev;
2795
2796	velocity_init_info(vptr, info);
2797
2798	if (bustype == BUS_PCI) {
2799		vptr->pdev = to_pci_dev(dev);
2800
2801		ret = velocity_get_pci_info(vptr);
2802		if (ret < 0)
2803			goto err_free_dev;
2804	} else {
2805		vptr->pdev = NULL;
2806		ret = velocity_get_platform_info(vptr);
2807		if (ret < 0)
2808			goto err_free_dev;
2809	}
2810
2811	regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2812	if (regs == NULL) {
2813		ret = -EIO;
2814		goto err_free_dev;
2815	}
2816
2817	vptr->mac_regs = regs;
2818	vptr->rev_id = readb(&regs->rev_id);
2819
2820	mac_wol_reset(regs);
2821
2822	for (i = 0; i < 6; i++)
2823		addr[i] = readb(&regs->PAR[i]);
2824	eth_hw_addr_set(netdev, addr);
2825
2826
2827	velocity_get_options(&vptr->options, velocity_nics);
2828
2829	/*
2830	 *	Mask out the options cannot be set to the chip
2831	 */
2832
2833	vptr->options.flags &= info->flags;
2834
2835	/*
2836	 *	Enable the chip specified capbilities
2837	 */
2838
2839	vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2840
2841	vptr->wol_opts = vptr->options.wol_opts;
2842	vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2843
2844	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2845
2846	netdev->netdev_ops = &velocity_netdev_ops;
2847	netdev->ethtool_ops = &velocity_ethtool_ops;
2848	netif_napi_add(netdev, &vptr->napi, velocity_poll);
 
2849
2850	netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2851			   NETIF_F_HW_VLAN_CTAG_TX;
2852	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2853			NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2854			NETIF_F_IP_CSUM;
2855
2856	/* MTU range: 64 - 9000 */
2857	netdev->min_mtu = VELOCITY_MIN_MTU;
2858	netdev->max_mtu = VELOCITY_MAX_MTU;
2859
2860	ret = register_netdev(netdev);
2861	if (ret < 0)
2862		goto err_iounmap;
2863
2864	if (!velocity_get_link(netdev)) {
2865		netif_carrier_off(netdev);
2866		vptr->mii_status |= VELOCITY_LINK_FAIL;
2867	}
2868
2869	velocity_print_info(vptr);
2870	dev_set_drvdata(vptr->dev, netdev);
2871
2872	/* and leave the chip powered down */
2873
2874	velocity_set_power_state(vptr, PCI_D3hot);
2875	velocity_nics++;
2876out:
2877	return ret;
2878
2879err_iounmap:
2880	netif_napi_del(&vptr->napi);
2881	iounmap(regs);
2882err_free_dev:
2883	free_netdev(netdev);
2884	goto out;
2885}
2886
2887/**
2888 *	velocity_remove	- device unplug
2889 *	@dev: device being removed
2890 *
2891 *	Device unload callback. Called on an unplug or on module
2892 *	unload for each active device that is present. Disconnects
2893 *	the device from the network layer and frees all the resources
2894 */
2895static int velocity_remove(struct device *dev)
2896{
2897	struct net_device *netdev = dev_get_drvdata(dev);
2898	struct velocity_info *vptr = netdev_priv(netdev);
2899
2900	unregister_netdev(netdev);
2901	netif_napi_del(&vptr->napi);
2902	iounmap(vptr->mac_regs);
2903	free_netdev(netdev);
2904	velocity_nics--;
2905
2906	return 0;
2907}
2908
2909static int velocity_pci_probe(struct pci_dev *pdev,
2910			       const struct pci_device_id *ent)
2911{
2912	const struct velocity_info_tbl *info =
2913					&chip_info_table[ent->driver_data];
2914	int ret;
2915
2916	ret = pci_enable_device(pdev);
2917	if (ret < 0)
2918		return ret;
2919
2920	ret = pci_request_regions(pdev, VELOCITY_NAME);
2921	if (ret < 0) {
2922		dev_err(&pdev->dev, "No PCI resources.\n");
2923		goto fail1;
2924	}
2925
2926	ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2927	if (ret == 0)
2928		return 0;
2929
2930	pci_release_regions(pdev);
2931fail1:
2932	pci_disable_device(pdev);
2933	return ret;
2934}
2935
2936static void velocity_pci_remove(struct pci_dev *pdev)
2937{
2938	velocity_remove(&pdev->dev);
2939
2940	pci_release_regions(pdev);
2941	pci_disable_device(pdev);
2942}
2943
2944static int velocity_platform_probe(struct platform_device *pdev)
2945{
 
2946	const struct velocity_info_tbl *info;
2947	int irq;
2948
2949	info = of_device_get_match_data(&pdev->dev);
2950	if (!info)
2951		return -EINVAL;
 
2952
2953	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2954	if (!irq)
2955		return -EINVAL;
2956
2957	return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2958}
2959
2960static void velocity_platform_remove(struct platform_device *pdev)
2961{
2962	velocity_remove(&pdev->dev);
 
 
2963}
2964
2965#ifdef CONFIG_PM_SLEEP
2966/**
2967 *	wol_calc_crc		-	WOL CRC
2968 *	@size: size of the wake mask
2969 *	@pattern: data pattern
2970 *	@mask_pattern: mask
2971 *
2972 *	Compute the wake on lan crc hashes for the packet header
2973 *	we are interested in.
2974 */
2975static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2976{
2977	u16 crc = 0xFFFF;
2978	u8 mask;
2979	int i, j;
2980
2981	for (i = 0; i < size; i++) {
2982		mask = mask_pattern[i];
2983
2984		/* Skip this loop if the mask equals to zero */
2985		if (mask == 0x00)
2986			continue;
2987
2988		for (j = 0; j < 8; j++) {
2989			if ((mask & 0x01) == 0) {
2990				mask >>= 1;
2991				continue;
2992			}
2993			mask >>= 1;
2994			crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2995		}
2996	}
2997	/*	Finally, invert the result once to get the correct data */
2998	crc = ~crc;
2999	return bitrev32(crc) >> 16;
3000}
3001
3002/**
3003 *	velocity_set_wol	-	set up for wake on lan
3004 *	@vptr: velocity to set WOL status on
3005 *
3006 *	Set a card up for wake on lan either by unicast or by
3007 *	ARP packet.
3008 *
3009 *	FIXME: check static buffer is safe here
3010 */
3011static int velocity_set_wol(struct velocity_info *vptr)
3012{
3013	struct mac_regs __iomem *regs = vptr->mac_regs;
3014	enum speed_opt spd_dpx = vptr->options.spd_dpx;
3015	static u8 buf[256];
3016	int i;
3017
3018	static u32 mask_pattern[2][4] = {
3019		{0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3020		{0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}	 /* Magic Packet */
3021	};
3022
3023	writew(0xFFFF, &regs->WOLCRClr);
3024	writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3025	writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3026
3027	/*
3028	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3029	   writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3030	 */
3031
3032	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3033		writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3034
3035	if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3036		struct arp_packet *arp = (struct arp_packet *) buf;
3037		u16 crc;
3038		memset(buf, 0, sizeof(struct arp_packet) + 7);
3039
3040		for (i = 0; i < 4; i++)
3041			writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3042
3043		arp->type = htons(ETH_P_ARP);
3044		arp->ar_op = htons(1);
3045
3046		memcpy(arp->ar_tip, vptr->ip_addr, 4);
3047
3048		crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3049				(u8 *) & mask_pattern[0][0]);
3050
3051		writew(crc, &regs->PatternCRC[0]);
3052		writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3053	}
3054
3055	BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3056	BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3057
3058	writew(0x0FFF, &regs->WOLSRClr);
3059
3060	if (spd_dpx == SPD_DPX_1000_FULL)
3061		goto mac_done;
3062
3063	if (spd_dpx != SPD_DPX_AUTO)
3064		goto advertise_done;
3065
3066	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3067		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3068			MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3069
3070		MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3071	}
3072
3073	if (vptr->mii_status & VELOCITY_SPEED_1000)
3074		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3075
3076advertise_done:
3077	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3078
3079	{
3080		u8 GCR;
3081		GCR = readb(&regs->CHIPGCR);
3082		GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3083		writeb(GCR, &regs->CHIPGCR);
3084	}
3085
3086mac_done:
3087	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3088	/* Turn on SWPTAG just before entering power mode */
3089	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3090	/* Go to bed ..... */
3091	BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3092
3093	return 0;
3094}
3095
3096/**
3097 *	velocity_save_context	-	save registers
3098 *	@vptr: velocity
3099 *	@context: buffer for stored context
3100 *
3101 *	Retrieve the current configuration from the velocity hardware
3102 *	and stash it in the context structure, for use by the context
3103 *	restore functions. This allows us to save things we need across
3104 *	power down states
3105 */
3106static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3107{
3108	struct mac_regs __iomem *regs = vptr->mac_regs;
3109	u16 i;
3110	u8 __iomem *ptr = (u8 __iomem *)regs;
3111
3112	for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3113		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3114
3115	for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3116		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3117
3118	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3119		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3120
3121}
3122
3123static int velocity_suspend(struct device *dev)
3124{
3125	struct net_device *netdev = dev_get_drvdata(dev);
3126	struct velocity_info *vptr = netdev_priv(netdev);
3127	unsigned long flags;
3128
3129	if (!netif_running(vptr->netdev))
3130		return 0;
3131
3132	netif_device_detach(vptr->netdev);
3133
3134	spin_lock_irqsave(&vptr->lock, flags);
3135	if (vptr->pdev)
3136		pci_save_state(vptr->pdev);
3137
3138	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3139		velocity_get_ip(vptr);
3140		velocity_save_context(vptr, &vptr->context);
3141		velocity_shutdown(vptr);
3142		velocity_set_wol(vptr);
3143		if (vptr->pdev)
3144			pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3145		velocity_set_power_state(vptr, PCI_D3hot);
3146	} else {
3147		velocity_save_context(vptr, &vptr->context);
3148		velocity_shutdown(vptr);
3149		if (vptr->pdev)
3150			pci_disable_device(vptr->pdev);
3151		velocity_set_power_state(vptr, PCI_D3hot);
3152	}
3153
3154	spin_unlock_irqrestore(&vptr->lock, flags);
3155	return 0;
3156}
3157
3158/**
3159 *	velocity_restore_context	-	restore registers
3160 *	@vptr: velocity
3161 *	@context: buffer for stored context
3162 *
3163 *	Reload the register configuration from the velocity context
3164 *	created by velocity_save_context.
3165 */
3166static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3167{
3168	struct mac_regs __iomem *regs = vptr->mac_regs;
3169	int i;
3170	u8 __iomem *ptr = (u8 __iomem *)regs;
3171
3172	for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3173		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3174
3175	/* Just skip cr0 */
3176	for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3177		/* Clear */
3178		writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3179		/* Set */
3180		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3181	}
3182
3183	for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3184		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3185
3186	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3187		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3188
3189	for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3190		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3191}
3192
3193static int velocity_resume(struct device *dev)
3194{
3195	struct net_device *netdev = dev_get_drvdata(dev);
3196	struct velocity_info *vptr = netdev_priv(netdev);
3197	unsigned long flags;
3198	int i;
3199
3200	if (!netif_running(vptr->netdev))
3201		return 0;
3202
3203	velocity_set_power_state(vptr, PCI_D0);
3204
3205	if (vptr->pdev) {
3206		pci_enable_wake(vptr->pdev, PCI_D0, 0);
3207		pci_restore_state(vptr->pdev);
3208	}
3209
3210	mac_wol_reset(vptr->mac_regs);
3211
3212	spin_lock_irqsave(&vptr->lock, flags);
3213	velocity_restore_context(vptr, &vptr->context);
3214	velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3215	mac_disable_int(vptr->mac_regs);
3216
3217	velocity_tx_srv(vptr);
3218
3219	for (i = 0; i < vptr->tx.numq; i++) {
3220		if (vptr->tx.used[i])
3221			mac_tx_queue_wake(vptr->mac_regs, i);
3222	}
3223
3224	mac_enable_int(vptr->mac_regs);
3225	spin_unlock_irqrestore(&vptr->lock, flags);
3226	netif_device_attach(vptr->netdev);
3227
3228	return 0;
3229}
3230#endif	/* CONFIG_PM_SLEEP */
3231
3232static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3233
3234/*
3235 *	Definition for our device driver. The PCI layer interface
3236 *	uses this to handle all our card discover and plugging
3237 */
3238static struct pci_driver velocity_pci_driver = {
3239	.name		= VELOCITY_NAME,
3240	.id_table	= velocity_pci_id_table,
3241	.probe		= velocity_pci_probe,
3242	.remove		= velocity_pci_remove,
3243	.driver = {
3244		.pm = &velocity_pm_ops,
3245	},
3246};
3247
3248static struct platform_driver velocity_platform_driver = {
3249	.probe		= velocity_platform_probe,
3250	.remove_new	= velocity_platform_remove,
3251	.driver = {
3252		.name = "via-velocity",
3253		.of_match_table = velocity_of_ids,
3254		.pm = &velocity_pm_ops,
3255	},
3256};
3257
3258/**
3259 *	velocity_ethtool_up	-	pre hook for ethtool
3260 *	@dev: network device
3261 *
3262 *	Called before an ethtool operation. We need to make sure the
3263 *	chip is out of D3 state before we poke at it. In case of ethtool
3264 *	ops nesting, only wake the device up in the outermost block.
3265 */
3266static int velocity_ethtool_up(struct net_device *dev)
3267{
3268	struct velocity_info *vptr = netdev_priv(dev);
3269
3270	if (vptr->ethtool_ops_nesting == U32_MAX)
3271		return -EBUSY;
3272	if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
3273		velocity_set_power_state(vptr, PCI_D0);
3274	return 0;
3275}
3276
3277/**
3278 *	velocity_ethtool_down	-	post hook for ethtool
3279 *	@dev: network device
3280 *
3281 *	Called after an ethtool operation. Restore the chip back to D3
3282 *	state if it isn't running. In case of ethtool ops nesting, only
3283 *	put the device to sleep in the outermost block.
3284 */
3285static void velocity_ethtool_down(struct net_device *dev)
3286{
3287	struct velocity_info *vptr = netdev_priv(dev);
3288
3289	if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
3290		velocity_set_power_state(vptr, PCI_D3hot);
3291}
3292
3293static int velocity_get_link_ksettings(struct net_device *dev,
3294				       struct ethtool_link_ksettings *cmd)
3295{
3296	struct velocity_info *vptr = netdev_priv(dev);
3297	struct mac_regs __iomem *regs = vptr->mac_regs;
3298	u32 status;
3299	u32 supported, advertising;
3300
3301	status = check_connection_type(vptr->mac_regs);
3302
3303	supported = SUPPORTED_TP |
3304			SUPPORTED_Autoneg |
3305			SUPPORTED_10baseT_Half |
3306			SUPPORTED_10baseT_Full |
3307			SUPPORTED_100baseT_Half |
3308			SUPPORTED_100baseT_Full |
3309			SUPPORTED_1000baseT_Half |
3310			SUPPORTED_1000baseT_Full;
3311
3312	advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3313	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3314		advertising |=
3315			ADVERTISED_10baseT_Half |
3316			ADVERTISED_10baseT_Full |
3317			ADVERTISED_100baseT_Half |
3318			ADVERTISED_100baseT_Full |
3319			ADVERTISED_1000baseT_Half |
3320			ADVERTISED_1000baseT_Full;
3321	} else {
3322		switch (vptr->options.spd_dpx) {
3323		case SPD_DPX_1000_FULL:
3324			advertising |= ADVERTISED_1000baseT_Full;
3325			break;
3326		case SPD_DPX_100_HALF:
3327			advertising |= ADVERTISED_100baseT_Half;
3328			break;
3329		case SPD_DPX_100_FULL:
3330			advertising |= ADVERTISED_100baseT_Full;
3331			break;
3332		case SPD_DPX_10_HALF:
3333			advertising |= ADVERTISED_10baseT_Half;
3334			break;
3335		case SPD_DPX_10_FULL:
3336			advertising |= ADVERTISED_10baseT_Full;
3337			break;
3338		default:
3339			break;
3340		}
3341	}
3342
3343	if (status & VELOCITY_SPEED_1000)
3344		cmd->base.speed = SPEED_1000;
3345	else if (status & VELOCITY_SPEED_100)
3346		cmd->base.speed = SPEED_100;
3347	else
3348		cmd->base.speed = SPEED_10;
3349
3350	cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3351		AUTONEG_ENABLE : AUTONEG_DISABLE;
3352	cmd->base.port = PORT_TP;
3353	cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
3354
3355	if (status & VELOCITY_DUPLEX_FULL)
3356		cmd->base.duplex = DUPLEX_FULL;
3357	else
3358		cmd->base.duplex = DUPLEX_HALF;
3359
3360	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3361						supported);
3362	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3363						advertising);
3364
3365	return 0;
3366}
3367
3368static int velocity_set_link_ksettings(struct net_device *dev,
3369				       const struct ethtool_link_ksettings *cmd)
3370{
3371	struct velocity_info *vptr = netdev_priv(dev);
3372	u32 speed = cmd->base.speed;
3373	u32 curr_status;
3374	u32 new_status = 0;
3375	int ret = 0;
3376
3377	curr_status = check_connection_type(vptr->mac_regs);
3378	curr_status &= (~VELOCITY_LINK_FAIL);
3379
3380	new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3381	new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3382	new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3383	new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3384	new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3385		       VELOCITY_DUPLEX_FULL : 0);
3386
3387	if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3388	    (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3389		ret = -EINVAL;
3390	} else {
3391		enum speed_opt spd_dpx;
3392
3393		if (new_status & VELOCITY_AUTONEG_ENABLE)
3394			spd_dpx = SPD_DPX_AUTO;
3395		else if ((new_status & VELOCITY_SPEED_1000) &&
3396			 (new_status & VELOCITY_DUPLEX_FULL)) {
3397			spd_dpx = SPD_DPX_1000_FULL;
3398		} else if (new_status & VELOCITY_SPEED_100)
3399			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3400				SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3401		else if (new_status & VELOCITY_SPEED_10)
3402			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3403				SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3404		else
3405			return -EOPNOTSUPP;
3406
3407		vptr->options.spd_dpx = spd_dpx;
3408
3409		velocity_set_media_mode(vptr, new_status);
3410	}
3411
3412	return ret;
3413}
3414
3415static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3416{
3417	struct velocity_info *vptr = netdev_priv(dev);
3418
3419	strscpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3420	strscpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3421	if (vptr->pdev)
3422		strscpy(info->bus_info, pci_name(vptr->pdev),
3423						sizeof(info->bus_info));
3424	else
3425		strscpy(info->bus_info, "platform", sizeof(info->bus_info));
3426}
3427
3428static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3429{
3430	struct velocity_info *vptr = netdev_priv(dev);
3431	wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3432	wol->wolopts |= WAKE_MAGIC;
3433	/*
3434	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3435		   wol.wolopts|=WAKE_PHY;
3436			 */
3437	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3438		wol->wolopts |= WAKE_UCAST;
3439	if (vptr->wol_opts & VELOCITY_WOL_ARP)
3440		wol->wolopts |= WAKE_ARP;
3441	memcpy(&wol->sopass, vptr->wol_passwd, 6);
3442}
3443
3444static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3445{
3446	struct velocity_info *vptr = netdev_priv(dev);
3447
3448	if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3449		return -EFAULT;
3450	vptr->wol_opts = VELOCITY_WOL_MAGIC;
3451
3452	/*
3453	   if (wol.wolopts & WAKE_PHY) {
3454	   vptr->wol_opts|=VELOCITY_WOL_PHY;
3455	   vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3456	   }
3457	 */
3458
3459	if (wol->wolopts & WAKE_MAGIC) {
3460		vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3461		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3462	}
3463	if (wol->wolopts & WAKE_UCAST) {
3464		vptr->wol_opts |= VELOCITY_WOL_UCAST;
3465		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3466	}
3467	if (wol->wolopts & WAKE_ARP) {
3468		vptr->wol_opts |= VELOCITY_WOL_ARP;
3469		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3470	}
3471	memcpy(vptr->wol_passwd, wol->sopass, 6);
3472	return 0;
3473}
3474
3475static int get_pending_timer_val(int val)
3476{
3477	int mult_bits = val >> 6;
3478	int mult = 1;
3479
3480	switch (mult_bits)
3481	{
3482	case 1:
3483		mult = 4; break;
3484	case 2:
3485		mult = 16; break;
3486	case 3:
3487		mult = 64; break;
3488	case 0:
3489	default:
3490		break;
3491	}
3492
3493	return (val & 0x3f) * mult;
3494}
3495
3496static void set_pending_timer_val(int *val, u32 us)
3497{
3498	u8 mult = 0;
3499	u8 shift = 0;
3500
3501	if (us >= 0x3f) {
3502		mult = 1; /* mult with 4 */
3503		shift = 2;
3504	}
3505	if (us >= 0x3f * 4) {
3506		mult = 2; /* mult with 16 */
3507		shift = 4;
3508	}
3509	if (us >= 0x3f * 16) {
3510		mult = 3; /* mult with 64 */
3511		shift = 6;
3512	}
3513
3514	*val = (mult << 6) | ((us >> shift) & 0x3f);
3515}
3516
3517
3518static int velocity_get_coalesce(struct net_device *dev,
3519				 struct ethtool_coalesce *ecmd,
3520				 struct kernel_ethtool_coalesce *kernel_coal,
3521				 struct netlink_ext_ack *extack)
3522{
3523	struct velocity_info *vptr = netdev_priv(dev);
3524
3525	ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3526	ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3527
3528	ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3529	ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3530
3531	return 0;
3532}
3533
3534static int velocity_set_coalesce(struct net_device *dev,
3535				 struct ethtool_coalesce *ecmd,
3536				 struct kernel_ethtool_coalesce *kernel_coal,
3537				 struct netlink_ext_ack *extack)
3538{
3539	struct velocity_info *vptr = netdev_priv(dev);
3540	int max_us = 0x3f * 64;
3541	unsigned long flags;
3542
3543	/* 6 bits of  */
3544	if (ecmd->tx_coalesce_usecs > max_us)
3545		return -EINVAL;
3546	if (ecmd->rx_coalesce_usecs > max_us)
3547		return -EINVAL;
3548
3549	if (ecmd->tx_max_coalesced_frames > 0xff)
3550		return -EINVAL;
3551	if (ecmd->rx_max_coalesced_frames > 0xff)
3552		return -EINVAL;
3553
3554	vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3555	vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3556
3557	set_pending_timer_val(&vptr->options.rxqueue_timer,
3558			ecmd->rx_coalesce_usecs);
3559	set_pending_timer_val(&vptr->options.txqueue_timer,
3560			ecmd->tx_coalesce_usecs);
3561
3562	/* Setup the interrupt suppression and queue timers */
3563	spin_lock_irqsave(&vptr->lock, flags);
3564	mac_disable_int(vptr->mac_regs);
3565	setup_adaptive_interrupts(vptr);
3566	setup_queue_timers(vptr);
3567
3568	mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3569	mac_clear_isr(vptr->mac_regs);
3570	mac_enable_int(vptr->mac_regs);
3571	spin_unlock_irqrestore(&vptr->lock, flags);
3572
3573	return 0;
3574}
3575
3576static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3577	"rx_all",
3578	"rx_ok",
3579	"tx_ok",
3580	"rx_error",
3581	"rx_runt_ok",
3582	"rx_runt_err",
3583	"rx_64",
3584	"tx_64",
3585	"rx_65_to_127",
3586	"tx_65_to_127",
3587	"rx_128_to_255",
3588	"tx_128_to_255",
3589	"rx_256_to_511",
3590	"tx_256_to_511",
3591	"rx_512_to_1023",
3592	"tx_512_to_1023",
3593	"rx_1024_to_1518",
3594	"tx_1024_to_1518",
3595	"tx_ether_collisions",
3596	"rx_crc_errors",
3597	"rx_jumbo",
3598	"tx_jumbo",
3599	"rx_mac_control_frames",
3600	"tx_mac_control_frames",
3601	"rx_frame_alignment_errors",
3602	"rx_long_ok",
3603	"rx_long_err",
3604	"tx_sqe_errors",
3605	"rx_no_buf",
3606	"rx_symbol_errors",
3607	"in_range_length_errors",
3608	"late_collisions"
3609};
3610
3611static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3612{
3613	switch (sset) {
3614	case ETH_SS_STATS:
3615		memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3616		break;
3617	}
3618}
3619
3620static int velocity_get_sset_count(struct net_device *dev, int sset)
3621{
3622	switch (sset) {
3623	case ETH_SS_STATS:
3624		return ARRAY_SIZE(velocity_gstrings);
3625	default:
3626		return -EOPNOTSUPP;
3627	}
3628}
3629
3630static void velocity_get_ethtool_stats(struct net_device *dev,
3631				       struct ethtool_stats *stats, u64 *data)
3632{
3633	if (netif_running(dev)) {
3634		struct velocity_info *vptr = netdev_priv(dev);
3635		u32 *p = vptr->mib_counter;
3636		int i;
3637
3638		spin_lock_irq(&vptr->lock);
3639		velocity_update_hw_mibs(vptr);
3640		spin_unlock_irq(&vptr->lock);
3641
3642		for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3643			*data++ = *p++;
3644	}
3645}
3646
3647static const struct ethtool_ops velocity_ethtool_ops = {
3648	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3649				     ETHTOOL_COALESCE_MAX_FRAMES,
3650	.get_drvinfo		= velocity_get_drvinfo,
3651	.get_wol		= velocity_ethtool_get_wol,
3652	.set_wol		= velocity_ethtool_set_wol,
3653	.get_link		= velocity_get_link,
3654	.get_strings		= velocity_get_strings,
3655	.get_sset_count		= velocity_get_sset_count,
3656	.get_ethtool_stats	= velocity_get_ethtool_stats,
3657	.get_coalesce		= velocity_get_coalesce,
3658	.set_coalesce		= velocity_set_coalesce,
3659	.begin			= velocity_ethtool_up,
3660	.complete		= velocity_ethtool_down,
3661	.get_link_ksettings	= velocity_get_link_ksettings,
3662	.set_link_ksettings	= velocity_set_link_ksettings,
3663};
3664
3665#if defined(CONFIG_PM) && defined(CONFIG_INET)
3666static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3667{
3668	struct in_ifaddr *ifa = ptr;
3669	struct net_device *dev = ifa->ifa_dev->dev;
3670
3671	if (dev_net(dev) == &init_net &&
3672	    dev->netdev_ops == &velocity_netdev_ops)
3673		velocity_get_ip(netdev_priv(dev));
3674
3675	return NOTIFY_DONE;
3676}
3677
3678static struct notifier_block velocity_inetaddr_notifier = {
3679	.notifier_call	= velocity_netdev_event,
3680};
3681
3682static void velocity_register_notifier(void)
3683{
3684	register_inetaddr_notifier(&velocity_inetaddr_notifier);
3685}
3686
3687static void velocity_unregister_notifier(void)
3688{
3689	unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3690}
3691
3692#else
3693
3694#define velocity_register_notifier()	do {} while (0)
3695#define velocity_unregister_notifier()	do {} while (0)
3696
3697#endif	/* defined(CONFIG_PM) && defined(CONFIG_INET) */
3698
3699/**
3700 *	velocity_init_module	-	load time function
3701 *
3702 *	Called when the velocity module is loaded. The PCI driver
3703 *	is registered with the PCI layer, and in turn will call
3704 *	the probe functions for each velocity adapter installed
3705 *	in the system.
3706 */
3707static int __init velocity_init_module(void)
3708{
3709	int ret_pci, ret_platform;
3710
3711	velocity_register_notifier();
3712
3713	ret_pci = pci_register_driver(&velocity_pci_driver);
3714	ret_platform = platform_driver_register(&velocity_platform_driver);
3715
3716	/* if both_registers failed, remove the notifier */
3717	if ((ret_pci < 0) && (ret_platform < 0)) {
3718		velocity_unregister_notifier();
3719		return ret_pci;
3720	}
3721
3722	return 0;
3723}
3724
3725/**
3726 *	velocity_cleanup_module		-	module unload
3727 *
3728 *	When the velocity hardware is unloaded this function is called.
3729 *	It will clean up the notifiers and the unregister the PCI
3730 *	driver interface for this hardware. This in turn cleans up
3731 *	all discovered interfaces before returning from the function
3732 */
3733static void __exit velocity_cleanup_module(void)
3734{
3735	velocity_unregister_notifier();
3736
3737	pci_unregister_driver(&velocity_pci_driver);
3738	platform_driver_unregister(&velocity_platform_driver);
3739}
3740
3741module_init(velocity_init_module);
3742module_exit(velocity_cleanup_module);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * This code is derived from the VIA reference driver (copyright message
   4 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
   5 * addition to the Linux kernel.
   6 *
   7 * The code has been merged into one source file, cleaned up to follow
   8 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
   9 * for 64bit hardware platforms.
  10 *
  11 * TODO
  12 *	rx_copybreak/alignment
  13 *	More testing
  14 *
  15 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
  16 * Additional fixes and clean up: Francois Romieu
  17 *
  18 * This source has not been verified for use in safety critical systems.
  19 *
  20 * Please direct queries about the revamped driver to the linux-kernel
  21 * list not VIA.
  22 *
  23 * Original code:
  24 *
  25 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
  26 * All rights reserved.
  27 *
  28 * Author: Chuang Liang-Shing, AJ Jiang
  29 *
  30 * Date: Jan 24, 2003
  31 *
  32 * MODULE_LICENSE("GPL");
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/module.h>
  38#include <linux/types.h>
  39#include <linux/bitops.h>
  40#include <linux/init.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/mm.h>
  43#include <linux/errno.h>
  44#include <linux/ioport.h>
  45#include <linux/pci.h>
  46#include <linux/kernel.h>
  47#include <linux/netdevice.h>
  48#include <linux/etherdevice.h>
  49#include <linux/skbuff.h>
  50#include <linux/delay.h>
  51#include <linux/timer.h>
  52#include <linux/slab.h>
  53#include <linux/interrupt.h>
  54#include <linux/string.h>
  55#include <linux/wait.h>
  56#include <linux/io.h>
  57#include <linux/if.h>
  58#include <linux/uaccess.h>
  59#include <linux/proc_fs.h>
 
  60#include <linux/of_address.h>
  61#include <linux/of_device.h>
  62#include <linux/of_irq.h>
  63#include <linux/inetdevice.h>
  64#include <linux/platform_device.h>
  65#include <linux/reboot.h>
  66#include <linux/ethtool.h>
  67#include <linux/mii.h>
  68#include <linux/in.h>
  69#include <linux/if_arp.h>
  70#include <linux/if_vlan.h>
  71#include <linux/ip.h>
  72#include <linux/tcp.h>
  73#include <linux/udp.h>
  74#include <linux/crc-ccitt.h>
  75#include <linux/crc32.h>
  76
  77#include "via-velocity.h"
  78
  79enum velocity_bus_type {
  80	BUS_PCI,
  81	BUS_PLATFORM,
  82};
  83
  84static int velocity_nics;
  85
  86static void velocity_set_power_state(struct velocity_info *vptr, char state)
  87{
  88	void *addr = vptr->mac_regs;
  89
  90	if (vptr->pdev)
  91		pci_set_power_state(vptr->pdev, state);
  92	else
  93		writeb(state, addr + 0x154);
  94}
  95
  96/**
  97 *	mac_get_cam_mask	-	Read a CAM mask
  98 *	@regs: register block for this velocity
  99 *	@mask: buffer to store mask
 100 *
 101 *	Fetch the mask bits of the selected CAM and store them into the
 102 *	provided mask buffer.
 103 */
 104static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 105{
 106	int i;
 107
 108	/* Select CAM mask */
 109	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 110
 111	writeb(0, &regs->CAMADDR);
 112
 113	/* read mask */
 114	for (i = 0; i < 8; i++)
 115		*mask++ = readb(&(regs->MARCAM[i]));
 116
 117	/* disable CAMEN */
 118	writeb(0, &regs->CAMADDR);
 119
 120	/* Select mar */
 121	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 122}
 123
 124/**
 125 *	mac_set_cam_mask	-	Set a CAM mask
 126 *	@regs: register block for this velocity
 127 *	@mask: CAM mask to load
 128 *
 129 *	Store a new mask into a CAM
 130 */
 131static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 132{
 133	int i;
 134	/* Select CAM mask */
 135	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 136
 137	writeb(CAMADDR_CAMEN, &regs->CAMADDR);
 138
 139	for (i = 0; i < 8; i++)
 140		writeb(*mask++, &(regs->MARCAM[i]));
 141
 142	/* disable CAMEN */
 143	writeb(0, &regs->CAMADDR);
 144
 145	/* Select mar */
 146	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 147}
 148
 149static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
 150{
 151	int i;
 152	/* Select CAM mask */
 153	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 154
 155	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
 156
 157	for (i = 0; i < 8; i++)
 158		writeb(*mask++, &(regs->MARCAM[i]));
 159
 160	/* disable CAMEN */
 161	writeb(0, &regs->CAMADDR);
 162
 163	/* Select mar */
 164	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 165}
 166
 167/**
 168 *	mac_set_cam	-	set CAM data
 169 *	@regs: register block of this velocity
 170 *	@idx: Cam index
 171 *	@addr: 2 or 6 bytes of CAM data
 172 *
 173 *	Load an address or vlan tag into a CAM
 174 */
 175static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
 176{
 177	int i;
 178
 179	/* Select CAM mask */
 180	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 181
 182	idx &= (64 - 1);
 183
 184	writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
 185
 186	for (i = 0; i < 6; i++)
 187		writeb(*addr++, &(regs->MARCAM[i]));
 188
 189	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 190
 191	udelay(10);
 192
 193	writeb(0, &regs->CAMADDR);
 194
 195	/* Select mar */
 196	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 197}
 198
 199static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
 200			     const u8 *addr)
 201{
 202
 203	/* Select CAM mask */
 204	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 205
 206	idx &= (64 - 1);
 207
 208	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
 209	writew(*((u16 *) addr), &regs->MARCAM[0]);
 210
 211	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
 212
 213	udelay(10);
 214
 215	writeb(0, &regs->CAMADDR);
 216
 217	/* Select mar */
 218	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
 219}
 220
 221
 222/**
 223 *	mac_wol_reset	-	reset WOL after exiting low power
 224 *	@regs: register block of this velocity
 225 *
 226 *	Called after we drop out of wake on lan mode in order to
 227 *	reset the Wake on lan features. This function doesn't restore
 228 *	the rest of the logic from the result of sleep/wakeup
 229 */
 230static void mac_wol_reset(struct mac_regs __iomem *regs)
 231{
 232
 233	/* Turn off SWPTAG right after leaving power mode */
 234	BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
 235	/* clear sticky bits */
 236	BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
 237
 238	BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
 239	BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 240	/* disable force PME-enable */
 241	writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
 242	/* disable power-event config bit */
 243	writew(0xFFFF, &regs->WOLCRClr);
 244	/* clear power status */
 245	writew(0xFFFF, &regs->WOLSRClr);
 246}
 247
 248static const struct ethtool_ops velocity_ethtool_ops;
 249
 250/*
 251    Define module options
 252*/
 253
 254MODULE_AUTHOR("VIA Networking Technologies, Inc.");
 255MODULE_LICENSE("GPL");
 256MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
 257
 258#define VELOCITY_PARAM(N, D) \
 259	static int N[MAX_UNITS] = OPTION_DEFAULT;\
 260	module_param_array(N, int, NULL, 0); \
 261	MODULE_PARM_DESC(N, D);
 262
 263#define RX_DESC_MIN     64
 264#define RX_DESC_MAX     255
 265#define RX_DESC_DEF     64
 266VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
 267
 268#define TX_DESC_MIN     16
 269#define TX_DESC_MAX     256
 270#define TX_DESC_DEF     64
 271VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
 272
 273#define RX_THRESH_MIN   0
 274#define RX_THRESH_MAX   3
 275#define RX_THRESH_DEF   0
 276/* rx_thresh[] is used for controlling the receive fifo threshold.
 277   0: indicate the rxfifo threshold is 128 bytes.
 278   1: indicate the rxfifo threshold is 512 bytes.
 279   2: indicate the rxfifo threshold is 1024 bytes.
 280   3: indicate the rxfifo threshold is store & forward.
 281*/
 282VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
 283
 284#define DMA_LENGTH_MIN  0
 285#define DMA_LENGTH_MAX  7
 286#define DMA_LENGTH_DEF  6
 287
 288/* DMA_length[] is used for controlling the DMA length
 289   0: 8 DWORDs
 290   1: 16 DWORDs
 291   2: 32 DWORDs
 292   3: 64 DWORDs
 293   4: 128 DWORDs
 294   5: 256 DWORDs
 295   6: SF(flush till emply)
 296   7: SF(flush till emply)
 297*/
 298VELOCITY_PARAM(DMA_length, "DMA length");
 299
 300#define IP_ALIG_DEF     0
 301/* IP_byte_align[] is used for IP header DWORD byte aligned
 302   0: indicate the IP header won't be DWORD byte aligned.(Default) .
 303   1: indicate the IP header will be DWORD byte aligned.
 304      In some environment, the IP header should be DWORD byte aligned,
 305      or the packet will be droped when we receive it. (eg: IPVS)
 306*/
 307VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
 308
 309#define FLOW_CNTL_DEF   1
 310#define FLOW_CNTL_MIN   1
 311#define FLOW_CNTL_MAX   5
 312
 313/* flow_control[] is used for setting the flow control ability of NIC.
 314   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
 315   2: enable TX flow control.
 316   3: enable RX flow control.
 317   4: enable RX/TX flow control.
 318   5: disable
 319*/
 320VELOCITY_PARAM(flow_control, "Enable flow control ability");
 321
 322#define MED_LNK_DEF 0
 323#define MED_LNK_MIN 0
 324#define MED_LNK_MAX 5
 325/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
 326   0: indicate autonegotiation for both speed and duplex mode
 327   1: indicate 100Mbps half duplex mode
 328   2: indicate 100Mbps full duplex mode
 329   3: indicate 10Mbps half duplex mode
 330   4: indicate 10Mbps full duplex mode
 331   5: indicate 1000Mbps full duplex mode
 332
 333   Note:
 334   if EEPROM have been set to the force mode, this option is ignored
 335   by driver.
 336*/
 337VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 338
 339#define WOL_OPT_DEF     0
 340#define WOL_OPT_MIN     0
 341#define WOL_OPT_MAX     7
 342/* wol_opts[] is used for controlling wake on lan behavior.
 343   0: Wake up if recevied a magic packet. (Default)
 344   1: Wake up if link status is on/off.
 345   2: Wake up if recevied an arp packet.
 346   4: Wake up if recevied any unicast packet.
 347   Those value can be sumed up to support more than one option.
 348*/
 349VELOCITY_PARAM(wol_opts, "Wake On Lan options");
 350
 351static int rx_copybreak = 200;
 352module_param(rx_copybreak, int, 0644);
 353MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 354
 355/*
 356 *	Internal board variants. At the moment we have only one
 357 */
 358static struct velocity_info_tbl chip_info_table[] = {
 359	{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
 360	{ }
 361};
 362
 363/*
 364 *	Describe the PCI device identifiers that we support in this
 365 *	device driver. Used for hotplug autoloading.
 366 */
 367
 368static const struct pci_device_id velocity_pci_id_table[] = {
 369	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
 370	{ }
 371};
 372
 373MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
 374
 375/**
 376 *	Describe the OF device identifiers that we support in this
 377 *	device driver. Used for devicetree nodes.
 378 */
 379static const struct of_device_id velocity_of_ids[] = {
 380	{ .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
 381	{ /* Sentinel */ },
 382};
 383MODULE_DEVICE_TABLE(of, velocity_of_ids);
 384
 385/**
 386 *	get_chip_name	- 	identifier to name
 387 *	@id: chip identifier
 388 *
 389 *	Given a chip identifier return a suitable description. Returns
 390 *	a pointer a static string valid while the driver is loaded.
 391 */
 392static const char *get_chip_name(enum chip_type chip_id)
 393{
 394	int i;
 395	for (i = 0; chip_info_table[i].name != NULL; i++)
 396		if (chip_info_table[i].chip_id == chip_id)
 397			break;
 398	return chip_info_table[i].name;
 399}
 400
 401/**
 402 *	velocity_set_int_opt	-	parser for integer options
 403 *	@opt: pointer to option value
 404 *	@val: value the user requested (or -1 for default)
 405 *	@min: lowest value allowed
 406 *	@max: highest value allowed
 407 *	@def: default value
 408 *	@name: property name
 409 *
 410 *	Set an integer property in the module options. This function does
 411 *	all the verification and checking as well as reporting so that
 412 *	we don't duplicate code for each option.
 413 */
 414static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
 415				 char *name)
 416{
 417	if (val == -1)
 418		*opt = def;
 419	else if (val < min || val > max) {
 420		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 421			  name, min, max);
 422		*opt = def;
 423	} else {
 424		pr_info("set value of parameter %s to %d\n", name, val);
 425		*opt = val;
 426	}
 427}
 428
 429/**
 430 *	velocity_set_bool_opt	-	parser for boolean options
 431 *	@opt: pointer to option value
 432 *	@val: value the user requested (or -1 for default)
 433 *	@def: default value (yes/no)
 434 *	@flag: numeric value to set for true.
 435 *	@name: property name
 436 *
 437 *	Set a boolean property in the module options. This function does
 438 *	all the verification and checking as well as reporting so that
 439 *	we don't duplicate code for each option.
 440 */
 441static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
 442				  char *name)
 443{
 444	(*opt) &= (~flag);
 445	if (val == -1)
 446		*opt |= (def ? flag : 0);
 447	else if (val < 0 || val > 1) {
 448		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
 449			  name, 0, 1);
 450		*opt |= (def ? flag : 0);
 451	} else {
 452		pr_info("set parameter %s to %s\n",
 453			name, val ? "TRUE" : "FALSE");
 454		*opt |= (val ? flag : 0);
 455	}
 456}
 457
 458/**
 459 *	velocity_get_options	-	set options on device
 460 *	@opts: option structure for the device
 461 *	@index: index of option to use in module options array
 462 *
 463 *	Turn the module and command options into a single structure
 464 *	for the current device
 465 */
 466static void velocity_get_options(struct velocity_opt *opts, int index)
 467{
 468
 469	velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index],
 470			     RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF,
 471			     "rx_thresh");
 472	velocity_set_int_opt(&opts->DMA_length, DMA_length[index],
 473			     DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF,
 474			     "DMA_length");
 475	velocity_set_int_opt(&opts->numrx, RxDescriptors[index],
 476			     RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF,
 477			     "RxDescriptors");
 478	velocity_set_int_opt(&opts->numtx, TxDescriptors[index],
 479			     TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF,
 480			     "TxDescriptors");
 481
 482	velocity_set_int_opt(&opts->flow_cntl, flow_control[index],
 483			     FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF,
 484			     "flow_control");
 485	velocity_set_bool_opt(&opts->flags, IP_byte_align[index],
 486			      IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN,
 487			      "IP_byte_align");
 488	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index],
 489			     MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF,
 490			     "Media link mode");
 491	velocity_set_int_opt(&opts->wol_opts, wol_opts[index],
 492			     WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF,
 493			     "Wake On Lan options");
 494	opts->numrx = (opts->numrx & ~3);
 495}
 496
 497/**
 498 *	velocity_init_cam_filter	-	initialise CAM
 499 *	@vptr: velocity to program
 500 *
 501 *	Initialize the content addressable memory used for filters. Load
 502 *	appropriately according to the presence of VLAN
 503 */
 504static void velocity_init_cam_filter(struct velocity_info *vptr)
 505{
 506	struct mac_regs __iomem *regs = vptr->mac_regs;
 507	unsigned int vid, i = 0;
 508
 509	/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
 510	WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
 511	WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
 512
 513	/* Disable all CAMs */
 514	memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
 515	memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
 516	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 517	mac_set_cam_mask(regs, vptr->mCAMmask);
 518
 519	/* Enable VCAMs */
 520	for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
 521		mac_set_vlan_cam(regs, i, (u8 *) &vid);
 522		vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
 523		if (++i >= VCAM_SIZE)
 524			break;
 525	}
 526	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 527}
 528
 529static int velocity_vlan_rx_add_vid(struct net_device *dev,
 530				    __be16 proto, u16 vid)
 531{
 532	struct velocity_info *vptr = netdev_priv(dev);
 533
 534	spin_lock_irq(&vptr->lock);
 535	set_bit(vid, vptr->active_vlans);
 536	velocity_init_cam_filter(vptr);
 537	spin_unlock_irq(&vptr->lock);
 538	return 0;
 539}
 540
 541static int velocity_vlan_rx_kill_vid(struct net_device *dev,
 542				     __be16 proto, u16 vid)
 543{
 544	struct velocity_info *vptr = netdev_priv(dev);
 545
 546	spin_lock_irq(&vptr->lock);
 547	clear_bit(vid, vptr->active_vlans);
 548	velocity_init_cam_filter(vptr);
 549	spin_unlock_irq(&vptr->lock);
 550	return 0;
 551}
 552
 553static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
 554{
 555	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
 556}
 557
 558/**
 559 *	velocity_rx_reset	-	handle a receive reset
 560 *	@vptr: velocity we are resetting
 561 *
 562 *	Reset the ownership and status for the receive ring side.
 563 *	Hand all the receive queue to the NIC.
 564 */
 565static void velocity_rx_reset(struct velocity_info *vptr)
 566{
 567
 568	struct mac_regs __iomem *regs = vptr->mac_regs;
 569	int i;
 570
 571	velocity_init_rx_ring_indexes(vptr);
 572
 573	/*
 574	 *	Init state, all RD entries belong to the NIC
 575	 */
 576	for (i = 0; i < vptr->options.numrx; ++i)
 577		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
 578
 579	writew(vptr->options.numrx, &regs->RBRDU);
 580	writel(vptr->rx.pool_dma, &regs->RDBaseLo);
 581	writew(0, &regs->RDIdx);
 582	writew(vptr->options.numrx - 1, &regs->RDCSize);
 583}
 584
 585/**
 586 *	velocity_get_opt_media_mode	-	get media selection
 587 *	@vptr: velocity adapter
 588 *
 589 *	Get the media mode stored in EEPROM or module options and load
 590 *	mii_status accordingly. The requested link state information
 591 *	is also returned.
 592 */
 593static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
 594{
 595	u32 status = 0;
 596
 597	switch (vptr->options.spd_dpx) {
 598	case SPD_DPX_AUTO:
 599		status = VELOCITY_AUTONEG_ENABLE;
 600		break;
 601	case SPD_DPX_100_FULL:
 602		status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
 603		break;
 604	case SPD_DPX_10_FULL:
 605		status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
 606		break;
 607	case SPD_DPX_100_HALF:
 608		status = VELOCITY_SPEED_100;
 609		break;
 610	case SPD_DPX_10_HALF:
 611		status = VELOCITY_SPEED_10;
 612		break;
 613	case SPD_DPX_1000_FULL:
 614		status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 615		break;
 616	}
 617	vptr->mii_status = status;
 618	return status;
 619}
 620
 621/**
 622 *	safe_disable_mii_autopoll	-	autopoll off
 623 *	@regs: velocity registers
 624 *
 625 *	Turn off the autopoll and wait for it to disable on the chip
 626 */
 627static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
 628{
 629	u16 ww;
 630
 631	/*  turn off MAUTO */
 632	writeb(0, &regs->MIICR);
 633	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 634		udelay(1);
 635		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 636			break;
 637	}
 638}
 639
 640/**
 641 *	enable_mii_autopoll	-	turn on autopolling
 642 *	@regs: velocity registers
 643 *
 644 *	Enable the MII link status autopoll feature on the Velocity
 645 *	hardware. Wait for it to enable.
 646 */
 647static void enable_mii_autopoll(struct mac_regs __iomem *regs)
 648{
 649	int ii;
 650
 651	writeb(0, &(regs->MIICR));
 652	writeb(MIIADR_SWMPL, &regs->MIIADR);
 653
 654	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 655		udelay(1);
 656		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 657			break;
 658	}
 659
 660	writeb(MIICR_MAUTO, &regs->MIICR);
 661
 662	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
 663		udelay(1);
 664		if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
 665			break;
 666	}
 667
 668}
 669
 670/**
 671 *	velocity_mii_read	-	read MII data
 672 *	@regs: velocity registers
 673 *	@index: MII register index
 674 *	@data: buffer for received data
 675 *
 676 *	Perform a single read of an MII 16bit register. Returns zero
 677 *	on success or -ETIMEDOUT if the PHY did not respond.
 678 */
 679static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
 680{
 681	u16 ww;
 682
 683	/*
 684	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
 685	 */
 686	safe_disable_mii_autopoll(regs);
 687
 688	writeb(index, &regs->MIIADR);
 689
 690	BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
 691
 692	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 693		if (!(readb(&regs->MIICR) & MIICR_RCMD))
 694			break;
 695	}
 696
 697	*data = readw(&regs->MIIDATA);
 698
 699	enable_mii_autopoll(regs);
 700	if (ww == W_MAX_TIMEOUT)
 701		return -ETIMEDOUT;
 702	return 0;
 703}
 704
 705/**
 706 *	mii_check_media_mode	-	check media state
 707 *	@regs: velocity registers
 708 *
 709 *	Check the current MII status and determine the link status
 710 *	accordingly
 711 */
 712static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
 713{
 714	u32 status = 0;
 715	u16 ANAR;
 716
 717	if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
 718		status |= VELOCITY_LINK_FAIL;
 719
 720	if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
 721		status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 722	else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
 723		status |= (VELOCITY_SPEED_1000);
 724	else {
 725		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 726		if (ANAR & ADVERTISE_100FULL)
 727			status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 728		else if (ANAR & ADVERTISE_100HALF)
 729			status |= VELOCITY_SPEED_100;
 730		else if (ANAR & ADVERTISE_10FULL)
 731			status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
 732		else
 733			status |= (VELOCITY_SPEED_10);
 734	}
 735
 736	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 737		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 738		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 739		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 740			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 741				status |= VELOCITY_AUTONEG_ENABLE;
 742		}
 743	}
 744
 745	return status;
 746}
 747
 748/**
 749 *	velocity_mii_write	-	write MII data
 750 *	@regs: velocity registers
 751 *	@index: MII register index
 752 *	@data: 16bit data for the MII register
 753 *
 754 *	Perform a single write to an MII 16bit register. Returns zero
 755 *	on success or -ETIMEDOUT if the PHY did not respond.
 756 */
 757static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
 758{
 759	u16 ww;
 760
 761	/*
 762	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
 763	 */
 764	safe_disable_mii_autopoll(regs);
 765
 766	/* MII reg offset */
 767	writeb(mii_addr, &regs->MIIADR);
 768	/* set MII data */
 769	writew(data, &regs->MIIDATA);
 770
 771	/* turn on MIICR_WCMD */
 772	BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
 773
 774	/* W_MAX_TIMEOUT is the timeout period */
 775	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
 776		udelay(5);
 777		if (!(readb(&regs->MIICR) & MIICR_WCMD))
 778			break;
 779	}
 780	enable_mii_autopoll(regs);
 781
 782	if (ww == W_MAX_TIMEOUT)
 783		return -ETIMEDOUT;
 784	return 0;
 785}
 786
 787/**
 788 *	set_mii_flow_control	-	flow control setup
 789 *	@vptr: velocity interface
 790 *
 791 *	Set up the flow control on this interface according to
 792 *	the supplied user/eeprom options.
 793 */
 794static void set_mii_flow_control(struct velocity_info *vptr)
 795{
 796	/*Enable or Disable PAUSE in ANAR */
 797	switch (vptr->options.flow_cntl) {
 798	case FLOW_CNTL_TX:
 799		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 800		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 801		break;
 802
 803	case FLOW_CNTL_RX:
 804		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 805		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 806		break;
 807
 808	case FLOW_CNTL_TX_RX:
 809		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 810		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 811		break;
 812
 813	case FLOW_CNTL_DISABLE:
 814		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 815		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 816		break;
 817	default:
 818		break;
 819	}
 820}
 821
 822/**
 823 *	mii_set_auto_on		-	autonegotiate on
 824 *	@vptr: velocity
 825 *
 826 *	Enable autonegotation on this interface
 827 */
 828static void mii_set_auto_on(struct velocity_info *vptr)
 829{
 830	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 831		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 832	else
 833		MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
 834}
 835
 836static u32 check_connection_type(struct mac_regs __iomem *regs)
 837{
 838	u32 status = 0;
 839	u8 PHYSR0;
 840	u16 ANAR;
 841	PHYSR0 = readb(&regs->PHYSR0);
 842
 843	/*
 844	   if (!(PHYSR0 & PHYSR0_LINKGD))
 845	   status|=VELOCITY_LINK_FAIL;
 846	 */
 847
 848	if (PHYSR0 & PHYSR0_FDPX)
 849		status |= VELOCITY_DUPLEX_FULL;
 850
 851	if (PHYSR0 & PHYSR0_SPDG)
 852		status |= VELOCITY_SPEED_1000;
 853	else if (PHYSR0 & PHYSR0_SPD10)
 854		status |= VELOCITY_SPEED_10;
 855	else
 856		status |= VELOCITY_SPEED_100;
 857
 858	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 859		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 860		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 861		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 862			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
 863				status |= VELOCITY_AUTONEG_ENABLE;
 864		}
 865	}
 866
 867	return status;
 868}
 869
 870/**
 871 *	velocity_set_media_mode		-	set media mode
 
 872 *	@mii_status: old MII link state
 873 *
 874 *	Check the media link state and configure the flow control
 875 *	PHY and also velocity hardware setup accordingly. In particular
 876 *	we need to set up CD polling and frame bursting.
 877 */
 878static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
 879{
 880	u32 curr_status;
 881	struct mac_regs __iomem *regs = vptr->mac_regs;
 882
 883	vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
 884	curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
 885
 886	/* Set mii link status */
 887	set_mii_flow_control(vptr);
 888
 889	/*
 890	   Check if new status is consistent with current status
 891	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 892	       (mii_status==curr_status)) {
 893	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
 894	   vptr->mii_status=check_connection_type(vptr->mac_regs);
 895	   netdev_info(vptr->netdev, "Velocity link no change\n");
 896	   return 0;
 897	   }
 898	 */
 899
 900	if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 901		MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
 902
 903	/*
 904	 *	If connection type is AUTO
 905	 */
 906	if (mii_status & VELOCITY_AUTONEG_ENABLE) {
 907		netdev_info(vptr->netdev, "Velocity is in AUTO mode\n");
 908		/* clear force MAC mode bit */
 909		BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
 910		/* set duplex mode of MAC according to duplex mode of MII */
 911		MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 912		MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 913		MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
 914
 915		/* enable AUTO-NEGO mode */
 916		mii_set_auto_on(vptr);
 917	} else {
 918		u16 CTRL1000;
 919		u16 ANAR;
 920		u8 CHIPGCR;
 921
 922		/*
 923		 * 1. if it's 3119, disable frame bursting in halfduplex mode
 924		 *    and enable it in fullduplex mode
 925		 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
 926		 * 3. only enable CD heart beat counter in 10HD mode
 927		 */
 928
 929		/* set force MAC mode bit */
 930		BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 931
 932		CHIPGCR = readb(&regs->CHIPGCR);
 933
 934		if (mii_status & VELOCITY_SPEED_1000)
 935			CHIPGCR |= CHIPGCR_FCGMII;
 936		else
 937			CHIPGCR &= ~CHIPGCR_FCGMII;
 938
 939		if (mii_status & VELOCITY_DUPLEX_FULL) {
 940			CHIPGCR |= CHIPGCR_FCFDX;
 941			writeb(CHIPGCR, &regs->CHIPGCR);
 942			netdev_info(vptr->netdev,
 943				    "set Velocity to forced full mode\n");
 944			if (vptr->rev_id < REV_ID_VT3216_A0)
 945				BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
 946		} else {
 947			CHIPGCR &= ~CHIPGCR_FCFDX;
 948			netdev_info(vptr->netdev,
 949				    "set Velocity to forced half mode\n");
 950			writeb(CHIPGCR, &regs->CHIPGCR);
 951			if (vptr->rev_id < REV_ID_VT3216_A0)
 952				BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
 953		}
 954
 955		velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
 956		CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
 957		if ((mii_status & VELOCITY_SPEED_1000) &&
 958		    (mii_status & VELOCITY_DUPLEX_FULL)) {
 959			CTRL1000 |= ADVERTISE_1000FULL;
 960		}
 961		velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 962
 963		if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
 964			BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
 965		else
 966			BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
 967
 968		/* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 969		velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 970		ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
 971		if (mii_status & VELOCITY_SPEED_100) {
 972			if (mii_status & VELOCITY_DUPLEX_FULL)
 973				ANAR |= ADVERTISE_100FULL;
 974			else
 975				ANAR |= ADVERTISE_100HALF;
 976		} else if (mii_status & VELOCITY_SPEED_10) {
 977			if (mii_status & VELOCITY_DUPLEX_FULL)
 978				ANAR |= ADVERTISE_10FULL;
 979			else
 980				ANAR |= ADVERTISE_10HALF;
 981		}
 982		velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
 983		/* enable AUTO-NEGO mode */
 984		mii_set_auto_on(vptr);
 985		/* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
 986	}
 987	/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
 988	/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
 989	return VELOCITY_LINK_CHANGE;
 990}
 991
 992/**
 993 *	velocity_print_link_status	-	link status reporting
 994 *	@vptr: velocity to report on
 995 *
 996 *	Turn the link status of the velocity card into a kernel log
 997 *	description of the new link state, detailing speed and duplex
 998 *	status
 999 */
1000static void velocity_print_link_status(struct velocity_info *vptr)
1001{
1002	const char *link;
1003	const char *speed;
1004	const char *duplex;
1005
1006	if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1007		netdev_notice(vptr->netdev, "failed to detect cable link\n");
1008		return;
1009	}
1010
1011	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1012		link = "auto-negotiation";
1013
1014		if (vptr->mii_status & VELOCITY_SPEED_1000)
1015			speed = "1000";
1016		else if (vptr->mii_status & VELOCITY_SPEED_100)
1017			speed = "100";
1018		else
1019			speed = "10";
1020
1021		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1022			duplex = "full";
1023		else
1024			duplex = "half";
1025	} else {
1026		link = "forced";
1027
1028		switch (vptr->options.spd_dpx) {
1029		case SPD_DPX_1000_FULL:
1030			speed = "1000";
1031			duplex = "full";
1032			break;
1033		case SPD_DPX_100_HALF:
1034			speed = "100";
1035			duplex = "half";
1036			break;
1037		case SPD_DPX_100_FULL:
1038			speed = "100";
1039			duplex = "full";
1040			break;
1041		case SPD_DPX_10_HALF:
1042			speed = "10";
1043			duplex = "half";
1044			break;
1045		case SPD_DPX_10_FULL:
1046			speed = "10";
1047			duplex = "full";
1048			break;
1049		default:
1050			speed = "unknown";
1051			duplex = "unknown";
1052			break;
1053		}
1054	}
1055	netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n",
1056		      link, speed, duplex);
1057}
1058
1059/**
1060 *	enable_flow_control_ability	-	flow control
1061 *	@vptr: veloity to configure
1062 *
1063 *	Set up flow control according to the flow control options
1064 *	determined by the eeprom/configuration.
1065 */
1066static void enable_flow_control_ability(struct velocity_info *vptr)
1067{
1068
1069	struct mac_regs __iomem *regs = vptr->mac_regs;
1070
1071	switch (vptr->options.flow_cntl) {
1072
1073	case FLOW_CNTL_DEFAULT:
1074		if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1075			writel(CR0_FDXRFCEN, &regs->CR0Set);
1076		else
1077			writel(CR0_FDXRFCEN, &regs->CR0Clr);
1078
1079		if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1080			writel(CR0_FDXTFCEN, &regs->CR0Set);
1081		else
1082			writel(CR0_FDXTFCEN, &regs->CR0Clr);
1083		break;
1084
1085	case FLOW_CNTL_TX:
1086		writel(CR0_FDXTFCEN, &regs->CR0Set);
1087		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1088		break;
1089
1090	case FLOW_CNTL_RX:
1091		writel(CR0_FDXRFCEN, &regs->CR0Set);
1092		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1093		break;
1094
1095	case FLOW_CNTL_TX_RX:
1096		writel(CR0_FDXTFCEN, &regs->CR0Set);
1097		writel(CR0_FDXRFCEN, &regs->CR0Set);
1098		break;
1099
1100	case FLOW_CNTL_DISABLE:
1101		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1102		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1103		break;
1104
1105	default:
1106		break;
1107	}
1108
1109}
1110
1111/**
1112 *	velocity_soft_reset	-	soft reset
1113 *	@vptr: velocity to reset
1114 *
1115 *	Kick off a soft reset of the velocity adapter and then poll
1116 *	until the reset sequence has completed before returning.
1117 */
1118static int velocity_soft_reset(struct velocity_info *vptr)
1119{
1120	struct mac_regs __iomem *regs = vptr->mac_regs;
1121	int i = 0;
1122
1123	writel(CR0_SFRST, &regs->CR0Set);
1124
1125	for (i = 0; i < W_MAX_TIMEOUT; i++) {
1126		udelay(5);
1127		if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1128			break;
1129	}
1130
1131	if (i == W_MAX_TIMEOUT) {
1132		writel(CR0_FORSRST, &regs->CR0Set);
1133		/* FIXME: PCI POSTING */
1134		/* delay 2ms */
1135		mdelay(2);
1136	}
1137	return 0;
1138}
1139
1140/**
1141 *	velocity_set_multi	-	filter list change callback
1142 *	@dev: network device
1143 *
1144 *	Called by the network layer when the filter lists need to change
1145 *	for a velocity adapter. Reload the CAMs with the new address
1146 *	filter ruleset.
1147 */
1148static void velocity_set_multi(struct net_device *dev)
1149{
1150	struct velocity_info *vptr = netdev_priv(dev);
1151	struct mac_regs __iomem *regs = vptr->mac_regs;
1152	u8 rx_mode;
1153	int i;
1154	struct netdev_hw_addr *ha;
1155
1156	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1157		writel(0xffffffff, &regs->MARCAM[0]);
1158		writel(0xffffffff, &regs->MARCAM[4]);
1159		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1160	} else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1161		   (dev->flags & IFF_ALLMULTI)) {
1162		writel(0xffffffff, &regs->MARCAM[0]);
1163		writel(0xffffffff, &regs->MARCAM[4]);
1164		rx_mode = (RCR_AM | RCR_AB);
1165	} else {
1166		int offset = MCAM_SIZE - vptr->multicast_limit;
1167		mac_get_cam_mask(regs, vptr->mCAMmask);
1168
1169		i = 0;
1170		netdev_for_each_mc_addr(ha, dev) {
1171			mac_set_cam(regs, i + offset, ha->addr);
1172			vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1173			i++;
1174		}
1175
1176		mac_set_cam_mask(regs, vptr->mCAMmask);
1177		rx_mode = RCR_AM | RCR_AB | RCR_AP;
1178	}
1179	if (dev->mtu > 1500)
1180		rx_mode |= RCR_AL;
1181
1182	BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1183
1184}
1185
1186/*
1187 * MII access , media link mode setting functions
1188 */
1189
1190/**
1191 *	mii_init	-	set up MII
1192 *	@vptr: velocity adapter
1193 *	@mii_status:  links tatus
1194 *
1195 *	Set up the PHY for the current link state.
1196 */
1197static void mii_init(struct velocity_info *vptr, u32 mii_status)
1198{
1199	u16 BMCR;
1200
1201	switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1202	case PHYID_ICPLUS_IP101A:
1203		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1204						MII_ADVERTISE, vptr->mac_regs);
1205		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1206			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1207								vptr->mac_regs);
1208		else
1209			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1210								vptr->mac_regs);
1211		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1212		break;
1213	case PHYID_CICADA_CS8201:
1214		/*
1215		 *	Reset to hardware default
1216		 */
1217		MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1218		/*
1219		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1220		 *	off it in NWay-forced half mode for NWay-forced v.s.
1221		 *	legacy-forced issue.
1222		 */
1223		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1224			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1225		else
1226			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1227		/*
1228		 *	Turn on Link/Activity LED enable bit for CIS8201
1229		 */
1230		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1231		break;
1232	case PHYID_VT3216_32BIT:
1233	case PHYID_VT3216_64BIT:
1234		/*
1235		 *	Reset to hardware default
1236		 */
1237		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1238		/*
1239		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1240		 *	off it in NWay-forced half mode for NWay-forced v.s.
1241		 *	legacy-forced issue
1242		 */
1243		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1244			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1245		else
1246			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1247		break;
1248
1249	case PHYID_MARVELL_1000:
1250	case PHYID_MARVELL_1000S:
1251		/*
1252		 *	Assert CRS on Transmit
1253		 */
1254		MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1255		/*
1256		 *	Reset to hardware default
1257		 */
1258		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1259		break;
1260	default:
1261		;
1262	}
1263	velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1264	if (BMCR & BMCR_ISOLATE) {
1265		BMCR &= ~BMCR_ISOLATE;
1266		velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1267	}
1268}
1269
1270/**
1271 * setup_queue_timers	-	Setup interrupt timers
 
1272 *
1273 * Setup interrupt frequency during suppression (timeout if the frame
1274 * count isn't filled).
1275 */
1276static void setup_queue_timers(struct velocity_info *vptr)
1277{
1278	/* Only for newer revisions */
1279	if (vptr->rev_id >= REV_ID_VT3216_A0) {
1280		u8 txqueue_timer = 0;
1281		u8 rxqueue_timer = 0;
1282
1283		if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1284				VELOCITY_SPEED_100)) {
1285			txqueue_timer = vptr->options.txqueue_timer;
1286			rxqueue_timer = vptr->options.rxqueue_timer;
1287		}
1288
1289		writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1290		writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1291	}
1292}
1293
1294/**
1295 * setup_adaptive_interrupts  -  Setup interrupt suppression
1296 *
1297 * @vptr velocity adapter
1298 *
1299 * The velocity is able to suppress interrupt during high interrupt load.
1300 * This function turns on that feature.
1301 */
1302static void setup_adaptive_interrupts(struct velocity_info *vptr)
1303{
1304	struct mac_regs __iomem *regs = vptr->mac_regs;
1305	u16 tx_intsup = vptr->options.tx_intsup;
1306	u16 rx_intsup = vptr->options.rx_intsup;
1307
1308	/* Setup default interrupt mask (will be changed below) */
1309	vptr->int_mask = INT_MASK_DEF;
1310
1311	/* Set Tx Interrupt Suppression Threshold */
1312	writeb(CAMCR_PS0, &regs->CAMCR);
1313	if (tx_intsup != 0) {
1314		vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1315				ISR_PTX2I | ISR_PTX3I);
1316		writew(tx_intsup, &regs->ISRCTL);
1317	} else
1318		writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1319
1320	/* Set Rx Interrupt Suppression Threshold */
1321	writeb(CAMCR_PS1, &regs->CAMCR);
1322	if (rx_intsup != 0) {
1323		vptr->int_mask &= ~ISR_PRXI;
1324		writew(rx_intsup, &regs->ISRCTL);
1325	} else
1326		writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1327
1328	/* Select page to interrupt hold timer */
1329	writeb(0, &regs->CAMCR);
1330}
1331
1332/**
1333 *	velocity_init_registers	-	initialise MAC registers
1334 *	@vptr: velocity to init
1335 *	@type: type of initialisation (hot or cold)
1336 *
1337 *	Initialise the MAC on a reset or on first set up on the
1338 *	hardware.
1339 */
1340static void velocity_init_registers(struct velocity_info *vptr,
1341				    enum velocity_init_type type)
1342{
1343	struct mac_regs __iomem *regs = vptr->mac_regs;
1344	struct net_device *netdev = vptr->netdev;
1345	int i, mii_status;
1346
1347	mac_wol_reset(regs);
1348
1349	switch (type) {
1350	case VELOCITY_INIT_RESET:
1351	case VELOCITY_INIT_WOL:
1352
1353		netif_stop_queue(netdev);
1354
1355		/*
1356		 *	Reset RX to prevent RX pointer not on the 4X location
1357		 */
1358		velocity_rx_reset(vptr);
1359		mac_rx_queue_run(regs);
1360		mac_rx_queue_wake(regs);
1361
1362		mii_status = velocity_get_opt_media_mode(vptr);
1363		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1364			velocity_print_link_status(vptr);
1365			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1366				netif_wake_queue(netdev);
1367		}
1368
1369		enable_flow_control_ability(vptr);
1370
1371		mac_clear_isr(regs);
1372		writel(CR0_STOP, &regs->CR0Clr);
1373		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1374							&regs->CR0Set);
1375
1376		break;
1377
1378	case VELOCITY_INIT_COLD:
1379	default:
1380		/*
1381		 *	Do reset
1382		 */
1383		velocity_soft_reset(vptr);
1384		mdelay(5);
1385
1386		if (!vptr->no_eeprom) {
1387			mac_eeprom_reload(regs);
1388			for (i = 0; i < 6; i++)
1389				writeb(netdev->dev_addr[i], regs->PAR + i);
1390		}
1391
1392		/*
1393		 *	clear Pre_ACPI bit.
1394		 */
1395		BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1396		mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1397		mac_set_dma_length(regs, vptr->options.DMA_length);
1398
1399		writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1400		/*
1401		 *	Back off algorithm use original IEEE standard
1402		 */
1403		BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1404
1405		/*
1406		 *	Init CAM filter
1407		 */
1408		velocity_init_cam_filter(vptr);
1409
1410		/*
1411		 *	Set packet filter: Receive directed and broadcast address
1412		 */
1413		velocity_set_multi(netdev);
1414
1415		/*
1416		 *	Enable MII auto-polling
1417		 */
1418		enable_mii_autopoll(regs);
1419
1420		setup_adaptive_interrupts(vptr);
1421
1422		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1423		writew(vptr->options.numrx - 1, &regs->RDCSize);
1424		mac_rx_queue_run(regs);
1425		mac_rx_queue_wake(regs);
1426
1427		writew(vptr->options.numtx - 1, &regs->TDCSize);
1428
1429		for (i = 0; i < vptr->tx.numq; i++) {
1430			writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1431			mac_tx_queue_run(regs, i);
1432		}
1433
1434		init_flow_control_register(vptr);
1435
1436		writel(CR0_STOP, &regs->CR0Clr);
1437		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1438
1439		mii_status = velocity_get_opt_media_mode(vptr);
1440		netif_stop_queue(netdev);
1441
1442		mii_init(vptr, mii_status);
1443
1444		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1445			velocity_print_link_status(vptr);
1446			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1447				netif_wake_queue(netdev);
1448		}
1449
1450		enable_flow_control_ability(vptr);
1451		mac_hw_mibs_init(regs);
1452		mac_write_int_mask(vptr->int_mask, regs);
1453		mac_clear_isr(regs);
1454
1455	}
1456}
1457
1458static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1459{
1460	struct mac_regs __iomem *regs = vptr->mac_regs;
1461	int avail, dirty, unusable;
1462
1463	/*
1464	 * RD number must be equal to 4X per hardware spec
1465	 * (programming guide rev 1.20, p.13)
1466	 */
1467	if (vptr->rx.filled < 4)
1468		return;
1469
1470	wmb();
1471
1472	unusable = vptr->rx.filled & 0x0003;
1473	dirty = vptr->rx.dirty - unusable;
1474	for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1475		dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1476		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1477	}
1478
1479	writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1480	vptr->rx.filled = unusable;
1481}
1482
1483/**
1484 *	velocity_init_dma_rings	-	set up DMA rings
1485 *	@vptr: Velocity to set up
1486 *
1487 *	Allocate PCI mapped DMA rings for the receive and transmit layer
1488 *	to use.
1489 */
1490static int velocity_init_dma_rings(struct velocity_info *vptr)
1491{
1492	struct velocity_opt *opt = &vptr->options;
1493	const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1494	const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1495	dma_addr_t pool_dma;
1496	void *pool;
1497	unsigned int i;
1498
1499	/*
1500	 * Allocate all RD/TD rings a single pool.
1501	 *
1502	 * dma_alloc_coherent() fulfills the requirement for 64 bytes
1503	 * alignment
1504	 */
1505	pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1506				    rx_ring_size, &pool_dma, GFP_ATOMIC);
1507	if (!pool) {
1508		dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1509			vptr->netdev->name);
1510		return -ENOMEM;
1511	}
1512
1513	vptr->rx.ring = pool;
1514	vptr->rx.pool_dma = pool_dma;
1515
1516	pool += rx_ring_size;
1517	pool_dma += rx_ring_size;
1518
1519	for (i = 0; i < vptr->tx.numq; i++) {
1520		vptr->tx.rings[i] = pool;
1521		vptr->tx.pool_dma[i] = pool_dma;
1522		pool += tx_ring_size;
1523		pool_dma += tx_ring_size;
1524	}
1525
1526	return 0;
1527}
1528
1529static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1530{
1531	vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1532}
1533
1534/**
1535 *	velocity_alloc_rx_buf	-	allocate aligned receive buffer
1536 *	@vptr: velocity
1537 *	@idx: ring index
1538 *
1539 *	Allocate a new full sized buffer for the reception of a frame and
1540 *	map it into PCI space for the hardware to use. The hardware
1541 *	requires *64* byte alignment of the buffer which makes life
1542 *	less fun than would be ideal.
1543 */
1544static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1545{
1546	struct rx_desc *rd = &(vptr->rx.ring[idx]);
1547	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1548
1549	rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1550	if (rd_info->skb == NULL)
1551		return -ENOMEM;
1552
1553	/*
1554	 *	Do the gymnastics to get the buffer head for data at
1555	 *	64byte alignment.
1556	 */
1557	skb_reserve(rd_info->skb,
1558			64 - ((unsigned long) rd_info->skb->data & 63));
1559	rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1560					vptr->rx.buf_sz, DMA_FROM_DEVICE);
1561
1562	/*
1563	 *	Fill in the descriptor to match
1564	 */
1565
1566	*((u32 *) & (rd->rdesc0)) = 0;
1567	rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1568	rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1569	rd->pa_high = 0;
1570	return 0;
1571}
1572
1573
1574static int velocity_rx_refill(struct velocity_info *vptr)
1575{
1576	int dirty = vptr->rx.dirty, done = 0;
1577
1578	do {
1579		struct rx_desc *rd = vptr->rx.ring + dirty;
1580
1581		/* Fine for an all zero Rx desc at init time as well */
1582		if (rd->rdesc0.len & OWNED_BY_NIC)
1583			break;
1584
1585		if (!vptr->rx.info[dirty].skb) {
1586			if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1587				break;
1588		}
1589		done++;
1590		dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1591	} while (dirty != vptr->rx.curr);
1592
1593	if (done) {
1594		vptr->rx.dirty = dirty;
1595		vptr->rx.filled += done;
1596	}
1597
1598	return done;
1599}
1600
1601/**
1602 *	velocity_free_rd_ring	-	free receive ring
1603 *	@vptr: velocity to clean up
1604 *
1605 *	Free the receive buffers for each ring slot and any
1606 *	attached socket buffers that need to go away.
1607 */
1608static void velocity_free_rd_ring(struct velocity_info *vptr)
1609{
1610	int i;
1611
1612	if (vptr->rx.info == NULL)
1613		return;
1614
1615	for (i = 0; i < vptr->options.numrx; i++) {
1616		struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1617		struct rx_desc *rd = vptr->rx.ring + i;
1618
1619		memset(rd, 0, sizeof(*rd));
1620
1621		if (!rd_info->skb)
1622			continue;
1623		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1624				 DMA_FROM_DEVICE);
1625		rd_info->skb_dma = 0;
1626
1627		dev_kfree_skb(rd_info->skb);
1628		rd_info->skb = NULL;
1629	}
1630
1631	kfree(vptr->rx.info);
1632	vptr->rx.info = NULL;
1633}
1634
1635/**
1636 *	velocity_init_rd_ring	-	set up receive ring
1637 *	@vptr: velocity to configure
1638 *
1639 *	Allocate and set up the receive buffers for each ring slot and
1640 *	assign them to the network adapter.
1641 */
1642static int velocity_init_rd_ring(struct velocity_info *vptr)
1643{
1644	int ret = -ENOMEM;
1645
1646	vptr->rx.info = kcalloc(vptr->options.numrx,
1647				sizeof(struct velocity_rd_info), GFP_KERNEL);
1648	if (!vptr->rx.info)
1649		goto out;
1650
1651	velocity_init_rx_ring_indexes(vptr);
1652
1653	if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1654		netdev_err(vptr->netdev, "failed to allocate RX buffer\n");
1655		velocity_free_rd_ring(vptr);
1656		goto out;
1657	}
1658
1659	ret = 0;
1660out:
1661	return ret;
1662}
1663
1664/**
1665 *	velocity_init_td_ring	-	set up transmit ring
1666 *	@vptr:	velocity
1667 *
1668 *	Set up the transmit ring and chain the ring pointers together.
1669 *	Returns zero on success or a negative posix errno code for
1670 *	failure.
1671 */
1672static int velocity_init_td_ring(struct velocity_info *vptr)
1673{
1674	int j;
1675
1676	/* Init the TD ring entries */
1677	for (j = 0; j < vptr->tx.numq; j++) {
1678
1679		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1680					    sizeof(struct velocity_td_info),
1681					    GFP_KERNEL);
1682		if (!vptr->tx.infos[j])	{
1683			while (--j >= 0)
1684				kfree(vptr->tx.infos[j]);
1685			return -ENOMEM;
1686		}
1687
1688		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1689	}
1690	return 0;
1691}
1692
1693/**
1694 *	velocity_free_dma_rings	-	free PCI ring pointers
1695 *	@vptr: Velocity to free from
1696 *
1697 *	Clean up the PCI ring buffers allocated to this velocity.
1698 */
1699static void velocity_free_dma_rings(struct velocity_info *vptr)
1700{
1701	const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1702		vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1703
1704	dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1705}
1706
1707static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1708{
1709	int ret;
1710
1711	velocity_set_rxbufsize(vptr, mtu);
1712
1713	ret = velocity_init_dma_rings(vptr);
1714	if (ret < 0)
1715		goto out;
1716
1717	ret = velocity_init_rd_ring(vptr);
1718	if (ret < 0)
1719		goto err_free_dma_rings_0;
1720
1721	ret = velocity_init_td_ring(vptr);
1722	if (ret < 0)
1723		goto err_free_rd_ring_1;
1724out:
1725	return ret;
1726
1727err_free_rd_ring_1:
1728	velocity_free_rd_ring(vptr);
1729err_free_dma_rings_0:
1730	velocity_free_dma_rings(vptr);
1731	goto out;
1732}
1733
1734/**
1735 *	velocity_free_tx_buf	-	free transmit buffer
1736 *	@vptr: velocity
1737 *	@tdinfo: buffer
 
1738 *
1739 *	Release an transmit buffer. If the buffer was preallocated then
1740 *	recycle it, if not then unmap the buffer.
1741 */
1742static void velocity_free_tx_buf(struct velocity_info *vptr,
1743		struct velocity_td_info *tdinfo, struct tx_desc *td)
1744{
1745	struct sk_buff *skb = tdinfo->skb;
1746	int i;
1747
1748	/*
1749	 *	Don't unmap the pre-allocated tx_bufs
1750	 */
1751	for (i = 0; i < tdinfo->nskb_dma; i++) {
1752		size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1753
1754		/* For scatter-gather */
1755		if (skb_shinfo(skb)->nr_frags > 0)
1756			pktlen = max_t(size_t, pktlen,
1757				       td->td_buf[i].size & ~TD_QUEUE);
1758
1759		dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1760				 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1761	}
1762	dev_consume_skb_irq(skb);
1763	tdinfo->skb = NULL;
1764}
1765
1766/*
1767 *	FIXME: could we merge this with velocity_free_tx_buf ?
1768 */
1769static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1770							 int q, int n)
1771{
1772	struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1773	int i;
1774
1775	if (td_info == NULL)
1776		return;
1777
1778	if (td_info->skb) {
1779		for (i = 0; i < td_info->nskb_dma; i++) {
1780			if (td_info->skb_dma[i]) {
1781				dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1782					td_info->skb->len, DMA_TO_DEVICE);
1783				td_info->skb_dma[i] = 0;
1784			}
1785		}
1786		dev_kfree_skb(td_info->skb);
1787		td_info->skb = NULL;
1788	}
1789}
1790
1791/**
1792 *	velocity_free_td_ring	-	free td ring
1793 *	@vptr: velocity
1794 *
1795 *	Free up the transmit ring for this particular velocity adapter.
1796 *	We free the ring contents but not the ring itself.
1797 */
1798static void velocity_free_td_ring(struct velocity_info *vptr)
1799{
1800	int i, j;
1801
1802	for (j = 0; j < vptr->tx.numq; j++) {
1803		if (vptr->tx.infos[j] == NULL)
1804			continue;
1805		for (i = 0; i < vptr->options.numtx; i++)
1806			velocity_free_td_ring_entry(vptr, j, i);
1807
1808		kfree(vptr->tx.infos[j]);
1809		vptr->tx.infos[j] = NULL;
1810	}
1811}
1812
1813static void velocity_free_rings(struct velocity_info *vptr)
1814{
1815	velocity_free_td_ring(vptr);
1816	velocity_free_rd_ring(vptr);
1817	velocity_free_dma_rings(vptr);
1818}
1819
1820/**
1821 *	velocity_error	-	handle error from controller
1822 *	@vptr: velocity
1823 *	@status: card status
1824 *
1825 *	Process an error report from the hardware and attempt to recover
1826 *	the card itself. At the moment we cannot recover from some
1827 *	theoretically impossible errors but this could be fixed using
1828 *	the pci_device_failed logic to bounce the hardware
1829 *
1830 */
1831static void velocity_error(struct velocity_info *vptr, int status)
1832{
1833
1834	if (status & ISR_TXSTLI) {
1835		struct mac_regs __iomem *regs = vptr->mac_regs;
1836
1837		netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n",
1838			   readw(&regs->TDIdx[0]));
1839		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1840		writew(TRDCSR_RUN, &regs->TDCSRClr);
1841		netif_stop_queue(vptr->netdev);
1842
1843		/* FIXME: port over the pci_device_failed code and use it
1844		   here */
1845	}
1846
1847	if (status & ISR_SRCI) {
1848		struct mac_regs __iomem *regs = vptr->mac_regs;
1849		int linked;
1850
1851		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1852			vptr->mii_status = check_connection_type(regs);
1853
1854			/*
1855			 *	If it is a 3119, disable frame bursting in
1856			 *	halfduplex mode and enable it in fullduplex
1857			 *	 mode
1858			 */
1859			if (vptr->rev_id < REV_ID_VT3216_A0) {
1860				if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1861					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1862				else
1863					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1864			}
1865			/*
1866			 *	Only enable CD heart beat counter in 10HD mode
1867			 */
1868			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1869				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1870			else
1871				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1872
1873			setup_queue_timers(vptr);
1874		}
1875		/*
1876		 *	Get link status from PHYSR0
1877		 */
1878		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1879
1880		if (linked) {
1881			vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1882			netif_carrier_on(vptr->netdev);
1883		} else {
1884			vptr->mii_status |= VELOCITY_LINK_FAIL;
1885			netif_carrier_off(vptr->netdev);
1886		}
1887
1888		velocity_print_link_status(vptr);
1889		enable_flow_control_ability(vptr);
1890
1891		/*
1892		 *	Re-enable auto-polling because SRCI will disable
1893		 *	auto-polling
1894		 */
1895
1896		enable_mii_autopoll(regs);
1897
1898		if (vptr->mii_status & VELOCITY_LINK_FAIL)
1899			netif_stop_queue(vptr->netdev);
1900		else
1901			netif_wake_queue(vptr->netdev);
1902
1903	}
1904	if (status & ISR_MIBFI)
1905		velocity_update_hw_mibs(vptr);
1906	if (status & ISR_LSTEI)
1907		mac_rx_queue_wake(vptr->mac_regs);
1908}
1909
1910/**
1911 *	tx_srv		-	transmit interrupt service
1912 *	@vptr; Velocity
1913 *
1914 *	Scan the queues looking for transmitted packets that
1915 *	we can complete and clean up. Update any statistics as
1916 *	necessary/
1917 */
1918static int velocity_tx_srv(struct velocity_info *vptr)
1919{
1920	struct tx_desc *td;
1921	int qnum;
1922	int full = 0;
1923	int idx;
1924	int works = 0;
1925	struct velocity_td_info *tdinfo;
1926	struct net_device_stats *stats = &vptr->netdev->stats;
1927
1928	for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1929		for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1930			idx = (idx + 1) % vptr->options.numtx) {
1931
1932			/*
1933			 *	Get Tx Descriptor
1934			 */
1935			td = &(vptr->tx.rings[qnum][idx]);
1936			tdinfo = &(vptr->tx.infos[qnum][idx]);
1937
1938			if (td->tdesc0.len & OWNED_BY_NIC)
1939				break;
1940
1941			if ((works++ > 15))
1942				break;
1943
1944			if (td->tdesc0.TSR & TSR0_TERR) {
1945				stats->tx_errors++;
1946				stats->tx_dropped++;
1947				if (td->tdesc0.TSR & TSR0_CDH)
1948					stats->tx_heartbeat_errors++;
1949				if (td->tdesc0.TSR & TSR0_CRS)
1950					stats->tx_carrier_errors++;
1951				if (td->tdesc0.TSR & TSR0_ABT)
1952					stats->tx_aborted_errors++;
1953				if (td->tdesc0.TSR & TSR0_OWC)
1954					stats->tx_window_errors++;
1955			} else {
1956				stats->tx_packets++;
1957				stats->tx_bytes += tdinfo->skb->len;
1958			}
1959			velocity_free_tx_buf(vptr, tdinfo, td);
1960			vptr->tx.used[qnum]--;
1961		}
1962		vptr->tx.tail[qnum] = idx;
1963
1964		if (AVAIL_TD(vptr, qnum) < 1)
1965			full = 1;
1966	}
1967	/*
1968	 *	Look to see if we should kick the transmit network
1969	 *	layer for more work.
1970	 */
1971	if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1972	    (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1973		netif_wake_queue(vptr->netdev);
1974	}
1975	return works;
1976}
1977
1978/**
1979 *	velocity_rx_csum	-	checksum process
1980 *	@rd: receive packet descriptor
1981 *	@skb: network layer packet buffer
1982 *
1983 *	Process the status bits for the received packet and determine
1984 *	if the checksum was computed and verified by the hardware
1985 */
1986static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1987{
1988	skb_checksum_none_assert(skb);
1989
1990	if (rd->rdesc1.CSM & CSM_IPKT) {
1991		if (rd->rdesc1.CSM & CSM_IPOK) {
1992			if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1993					(rd->rdesc1.CSM & CSM_UDPKT)) {
1994				if (!(rd->rdesc1.CSM & CSM_TUPOK))
1995					return;
1996			}
1997			skb->ip_summed = CHECKSUM_UNNECESSARY;
1998		}
1999	}
2000}
2001
2002/**
2003 *	velocity_rx_copy	-	in place Rx copy for small packets
2004 *	@rx_skb: network layer packet buffer candidate
2005 *	@pkt_size: received data size
2006 *	@rd: receive packet descriptor
2007 *	@dev: network device
2008 *
2009 *	Replace the current skb that is scheduled for Rx processing by a
2010 *	shorter, immediately allocated skb, if the received packet is small
2011 *	enough. This function returns a negative value if the received
2012 *	packet is too big or if memory is exhausted.
2013 */
2014static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
2015			    struct velocity_info *vptr)
2016{
2017	int ret = -1;
2018	if (pkt_size < rx_copybreak) {
2019		struct sk_buff *new_skb;
2020
2021		new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
2022		if (new_skb) {
2023			new_skb->ip_summed = rx_skb[0]->ip_summed;
2024			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2025			*rx_skb = new_skb;
2026			ret = 0;
2027		}
2028
2029	}
2030	return ret;
2031}
2032
2033/**
2034 *	velocity_iph_realign	-	IP header alignment
2035 *	@vptr: velocity we are handling
2036 *	@skb: network layer packet buffer
2037 *	@pkt_size: received data size
2038 *
2039 *	Align IP header on a 2 bytes boundary. This behavior can be
2040 *	configured by the user.
2041 */
2042static inline void velocity_iph_realign(struct velocity_info *vptr,
2043					struct sk_buff *skb, int pkt_size)
2044{
2045	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2046		memmove(skb->data + 2, skb->data, pkt_size);
2047		skb_reserve(skb, 2);
2048	}
2049}
2050
2051/**
2052 *	velocity_receive_frame	-	received packet processor
2053 *	@vptr: velocity we are handling
2054 *	@idx: ring index
2055 *
2056 *	A packet has arrived. We process the packet and if appropriate
2057 *	pass the frame up the network stack
2058 */
2059static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2060{
2061	struct net_device_stats *stats = &vptr->netdev->stats;
2062	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2063	struct rx_desc *rd = &(vptr->rx.ring[idx]);
2064	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2065	struct sk_buff *skb;
2066
2067	if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2068		if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2069			netdev_err(vptr->netdev, "received frame spans multiple RDs\n");
2070		stats->rx_length_errors++;
2071		return -EINVAL;
2072	}
2073
2074	if (rd->rdesc0.RSR & RSR_MAR)
2075		stats->multicast++;
2076
2077	skb = rd_info->skb;
2078
2079	dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2080				    vptr->rx.buf_sz, DMA_FROM_DEVICE);
2081
2082	velocity_rx_csum(rd, skb);
2083
2084	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2085		velocity_iph_realign(vptr, skb, pkt_len);
2086		rd_info->skb = NULL;
2087		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2088				 DMA_FROM_DEVICE);
2089	} else {
2090		dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2091					   vptr->rx.buf_sz, DMA_FROM_DEVICE);
2092	}
2093
2094	skb_put(skb, pkt_len - 4);
2095	skb->protocol = eth_type_trans(skb, vptr->netdev);
2096
2097	if (rd->rdesc0.RSR & RSR_DETAG) {
2098		u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2099
2100		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2101	}
2102	netif_receive_skb(skb);
2103
2104	stats->rx_bytes += pkt_len;
2105	stats->rx_packets++;
2106
2107	return 0;
2108}
2109
2110/**
2111 *	velocity_rx_srv		-	service RX interrupt
2112 *	@vptr: velocity
 
2113 *
2114 *	Walk the receive ring of the velocity adapter and remove
2115 *	any received packets from the receive queue. Hand the ring
2116 *	slots back to the adapter for reuse.
2117 */
2118static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2119{
2120	struct net_device_stats *stats = &vptr->netdev->stats;
2121	int rd_curr = vptr->rx.curr;
2122	int works = 0;
2123
2124	while (works < budget_left) {
2125		struct rx_desc *rd = vptr->rx.ring + rd_curr;
2126
2127		if (!vptr->rx.info[rd_curr].skb)
2128			break;
2129
2130		if (rd->rdesc0.len & OWNED_BY_NIC)
2131			break;
2132
2133		rmb();
2134
2135		/*
2136		 *	Don't drop CE or RL error frame although RXOK is off
2137		 */
2138		if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2139			if (velocity_receive_frame(vptr, rd_curr) < 0)
2140				stats->rx_dropped++;
2141		} else {
2142			if (rd->rdesc0.RSR & RSR_CRC)
2143				stats->rx_crc_errors++;
2144			if (rd->rdesc0.RSR & RSR_FAE)
2145				stats->rx_frame_errors++;
2146
2147			stats->rx_dropped++;
2148		}
2149
2150		rd->size |= RX_INTEN;
2151
2152		rd_curr++;
2153		if (rd_curr >= vptr->options.numrx)
2154			rd_curr = 0;
2155		works++;
2156	}
2157
2158	vptr->rx.curr = rd_curr;
2159
2160	if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2161		velocity_give_many_rx_descs(vptr);
2162
2163	VAR_USED(stats);
2164	return works;
2165}
2166
2167static int velocity_poll(struct napi_struct *napi, int budget)
2168{
2169	struct velocity_info *vptr = container_of(napi,
2170			struct velocity_info, napi);
2171	unsigned int rx_done;
2172	unsigned long flags;
2173
2174	/*
2175	 * Do rx and tx twice for performance (taken from the VIA
2176	 * out-of-tree driver).
2177	 */
2178	rx_done = velocity_rx_srv(vptr, budget);
2179	spin_lock_irqsave(&vptr->lock, flags);
2180	velocity_tx_srv(vptr);
2181	/* If budget not fully consumed, exit the polling mode */
2182	if (rx_done < budget) {
2183		napi_complete_done(napi, rx_done);
2184		mac_enable_int(vptr->mac_regs);
2185	}
2186	spin_unlock_irqrestore(&vptr->lock, flags);
2187
2188	return rx_done;
2189}
2190
2191/**
2192 *	velocity_intr		-	interrupt callback
2193 *	@irq: interrupt number
2194 *	@dev_instance: interrupting device
2195 *
2196 *	Called whenever an interrupt is generated by the velocity
2197 *	adapter IRQ line. We may not be the source of the interrupt
2198 *	and need to identify initially if we are, and if not exit as
2199 *	efficiently as possible.
2200 */
2201static irqreturn_t velocity_intr(int irq, void *dev_instance)
2202{
2203	struct net_device *dev = dev_instance;
2204	struct velocity_info *vptr = netdev_priv(dev);
2205	u32 isr_status;
2206
2207	spin_lock(&vptr->lock);
2208	isr_status = mac_read_isr(vptr->mac_regs);
2209
2210	/* Not us ? */
2211	if (isr_status == 0) {
2212		spin_unlock(&vptr->lock);
2213		return IRQ_NONE;
2214	}
2215
2216	/* Ack the interrupt */
2217	mac_write_isr(vptr->mac_regs, isr_status);
2218
2219	if (likely(napi_schedule_prep(&vptr->napi))) {
2220		mac_disable_int(vptr->mac_regs);
2221		__napi_schedule(&vptr->napi);
2222	}
2223
2224	if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2225		velocity_error(vptr, isr_status);
2226
2227	spin_unlock(&vptr->lock);
2228
2229	return IRQ_HANDLED;
2230}
2231
2232/**
2233 *	velocity_open		-	interface activation callback
2234 *	@dev: network layer device to open
2235 *
2236 *	Called when the network layer brings the interface up. Returns
2237 *	a negative posix error code on failure, or zero on success.
2238 *
2239 *	All the ring allocation and set up is done on open for this
2240 *	adapter to minimise memory usage when inactive
2241 */
2242static int velocity_open(struct net_device *dev)
2243{
2244	struct velocity_info *vptr = netdev_priv(dev);
2245	int ret;
2246
2247	ret = velocity_init_rings(vptr, dev->mtu);
2248	if (ret < 0)
2249		goto out;
2250
2251	/* Ensure chip is running */
2252	velocity_set_power_state(vptr, PCI_D0);
2253
2254	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2255
2256	ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2257			  dev->name, dev);
2258	if (ret < 0) {
2259		/* Power down the chip */
2260		velocity_set_power_state(vptr, PCI_D3hot);
2261		velocity_free_rings(vptr);
2262		goto out;
2263	}
2264
2265	velocity_give_many_rx_descs(vptr);
2266
2267	mac_enable_int(vptr->mac_regs);
2268	netif_start_queue(dev);
2269	napi_enable(&vptr->napi);
2270	vptr->flags |= VELOCITY_FLAGS_OPENED;
2271out:
2272	return ret;
2273}
2274
2275/**
2276 *	velocity_shutdown	-	shut down the chip
2277 *	@vptr: velocity to deactivate
2278 *
2279 *	Shuts down the internal operations of the velocity and
2280 *	disables interrupts, autopolling, transmit and receive
2281 */
2282static void velocity_shutdown(struct velocity_info *vptr)
2283{
2284	struct mac_regs __iomem *regs = vptr->mac_regs;
2285	mac_disable_int(regs);
2286	writel(CR0_STOP, &regs->CR0Set);
2287	writew(0xFFFF, &regs->TDCSRClr);
2288	writeb(0xFF, &regs->RDCSRClr);
2289	safe_disable_mii_autopoll(regs);
2290	mac_clear_isr(regs);
2291}
2292
2293/**
2294 *	velocity_change_mtu	-	MTU change callback
2295 *	@dev: network device
2296 *	@new_mtu: desired MTU
2297 *
2298 *	Handle requests from the networking layer for MTU change on
2299 *	this interface. It gets called on a change by the network layer.
2300 *	Return zero for success or negative posix error code.
2301 */
2302static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2303{
2304	struct velocity_info *vptr = netdev_priv(dev);
2305	int ret = 0;
2306
2307	if (!netif_running(dev)) {
2308		dev->mtu = new_mtu;
2309		goto out_0;
2310	}
2311
2312	if (dev->mtu != new_mtu) {
2313		struct velocity_info *tmp_vptr;
2314		unsigned long flags;
2315		struct rx_info rx;
2316		struct tx_info tx;
2317
2318		tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2319		if (!tmp_vptr) {
2320			ret = -ENOMEM;
2321			goto out_0;
2322		}
2323
2324		tmp_vptr->netdev = dev;
2325		tmp_vptr->pdev = vptr->pdev;
2326		tmp_vptr->dev = vptr->dev;
2327		tmp_vptr->options = vptr->options;
2328		tmp_vptr->tx.numq = vptr->tx.numq;
2329
2330		ret = velocity_init_rings(tmp_vptr, new_mtu);
2331		if (ret < 0)
2332			goto out_free_tmp_vptr_1;
2333
2334		napi_disable(&vptr->napi);
2335
2336		spin_lock_irqsave(&vptr->lock, flags);
2337
2338		netif_stop_queue(dev);
2339		velocity_shutdown(vptr);
2340
2341		rx = vptr->rx;
2342		tx = vptr->tx;
2343
2344		vptr->rx = tmp_vptr->rx;
2345		vptr->tx = tmp_vptr->tx;
2346
2347		tmp_vptr->rx = rx;
2348		tmp_vptr->tx = tx;
2349
2350		dev->mtu = new_mtu;
2351
2352		velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2353
2354		velocity_give_many_rx_descs(vptr);
2355
2356		napi_enable(&vptr->napi);
2357
2358		mac_enable_int(vptr->mac_regs);
2359		netif_start_queue(dev);
2360
2361		spin_unlock_irqrestore(&vptr->lock, flags);
2362
2363		velocity_free_rings(tmp_vptr);
2364
2365out_free_tmp_vptr_1:
2366		kfree(tmp_vptr);
2367	}
2368out_0:
2369	return ret;
2370}
2371
2372#ifdef CONFIG_NET_POLL_CONTROLLER
2373/**
2374 *  velocity_poll_controller		-	Velocity Poll controller function
2375 *  @dev: network device
2376 *
2377 *
2378 *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
2379 *  with interrupts disabled.
2380 */
2381static void velocity_poll_controller(struct net_device *dev)
2382{
2383	disable_irq(dev->irq);
2384	velocity_intr(dev->irq, dev);
2385	enable_irq(dev->irq);
2386}
2387#endif
2388
2389/**
2390 *	velocity_mii_ioctl		-	MII ioctl handler
2391 *	@dev: network device
2392 *	@ifr: the ifreq block for the ioctl
2393 *	@cmd: the command
2394 *
2395 *	Process MII requests made via ioctl from the network layer. These
2396 *	are used by tools like kudzu to interrogate the link state of the
2397 *	hardware
2398 */
2399static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2400{
2401	struct velocity_info *vptr = netdev_priv(dev);
2402	struct mac_regs __iomem *regs = vptr->mac_regs;
2403	unsigned long flags;
2404	struct mii_ioctl_data *miidata = if_mii(ifr);
2405	int err;
2406
2407	switch (cmd) {
2408	case SIOCGMIIPHY:
2409		miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2410		break;
2411	case SIOCGMIIREG:
2412		if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2413			return -ETIMEDOUT;
2414		break;
2415	case SIOCSMIIREG:
2416		spin_lock_irqsave(&vptr->lock, flags);
2417		err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2418		spin_unlock_irqrestore(&vptr->lock, flags);
2419		check_connection_type(vptr->mac_regs);
2420		if (err)
2421			return err;
2422		break;
2423	default:
2424		return -EOPNOTSUPP;
2425	}
2426	return 0;
2427}
2428
2429/**
2430 *	velocity_ioctl		-	ioctl entry point
2431 *	@dev: network device
2432 *	@rq: interface request ioctl
2433 *	@cmd: command code
2434 *
2435 *	Called when the user issues an ioctl request to the network
2436 *	device in question. The velocity interface supports MII.
2437 */
2438static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2439{
2440	struct velocity_info *vptr = netdev_priv(dev);
2441	int ret;
2442
2443	/* If we are asked for information and the device is power
2444	   saving then we need to bring the device back up to talk to it */
2445
2446	if (!netif_running(dev))
2447		velocity_set_power_state(vptr, PCI_D0);
2448
2449	switch (cmd) {
2450	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
2451	case SIOCGMIIREG:	/* Read MII PHY register. */
2452	case SIOCSMIIREG:	/* Write to MII PHY register. */
2453		ret = velocity_mii_ioctl(dev, rq, cmd);
2454		break;
2455
2456	default:
2457		ret = -EOPNOTSUPP;
2458	}
2459	if (!netif_running(dev))
2460		velocity_set_power_state(vptr, PCI_D3hot);
2461
2462
2463	return ret;
2464}
2465
2466/**
2467 *	velocity_get_status	-	statistics callback
2468 *	@dev: network device
2469 *
2470 *	Callback from the network layer to allow driver statistics
2471 *	to be resynchronized with hardware collected state. In the
2472 *	case of the velocity we need to pull the MIB counters from
2473 *	the hardware into the counters before letting the network
2474 *	layer display them.
2475 */
2476static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2477{
2478	struct velocity_info *vptr = netdev_priv(dev);
2479
2480	/* If the hardware is down, don't touch MII */
2481	if (!netif_running(dev))
2482		return &dev->stats;
2483
2484	spin_lock_irq(&vptr->lock);
2485	velocity_update_hw_mibs(vptr);
2486	spin_unlock_irq(&vptr->lock);
2487
2488	dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2489	dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2490	dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2491
2492//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2493	dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2494	/* detailed rx_errors: */
2495//  unsigned long   rx_length_errors;
2496//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2497	dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2498//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2499//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2500//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2501
2502	/* detailed tx_errors */
2503//  unsigned long   tx_fifo_errors;
2504
2505	return &dev->stats;
2506}
2507
2508/**
2509 *	velocity_close		-	close adapter callback
2510 *	@dev: network device
2511 *
2512 *	Callback from the network layer when the velocity is being
2513 *	deactivated by the network layer
2514 */
2515static int velocity_close(struct net_device *dev)
2516{
2517	struct velocity_info *vptr = netdev_priv(dev);
2518
2519	napi_disable(&vptr->napi);
2520	netif_stop_queue(dev);
2521	velocity_shutdown(vptr);
2522
2523	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2524		velocity_get_ip(vptr);
2525
2526	free_irq(dev->irq, dev);
2527
2528	velocity_free_rings(vptr);
2529
2530	vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2531	return 0;
2532}
2533
2534/**
2535 *	velocity_xmit		-	transmit packet callback
2536 *	@skb: buffer to transmit
2537 *	@dev: network device
2538 *
2539 *	Called by the networ layer to request a packet is queued to
2540 *	the velocity. Returns zero on success.
2541 */
2542static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2543				 struct net_device *dev)
2544{
2545	struct velocity_info *vptr = netdev_priv(dev);
2546	int qnum = 0;
2547	struct tx_desc *td_ptr;
2548	struct velocity_td_info *tdinfo;
2549	unsigned long flags;
2550	int pktlen;
2551	int index, prev;
2552	int i = 0;
2553
2554	if (skb_padto(skb, ETH_ZLEN))
2555		goto out;
2556
2557	/* The hardware can handle at most 7 memory segments, so merge
2558	 * the skb if there are more */
2559	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2560		dev_kfree_skb_any(skb);
2561		return NETDEV_TX_OK;
2562	}
2563
2564	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2565			max_t(unsigned int, skb->len, ETH_ZLEN) :
2566				skb_headlen(skb);
2567
2568	spin_lock_irqsave(&vptr->lock, flags);
2569
2570	index = vptr->tx.curr[qnum];
2571	td_ptr = &(vptr->tx.rings[qnum][index]);
2572	tdinfo = &(vptr->tx.infos[qnum][index]);
2573
2574	td_ptr->tdesc1.TCR = TCR0_TIC;
2575	td_ptr->td_buf[0].size &= ~TD_QUEUE;
2576
2577	/*
2578	 *	Map the linear network buffer into PCI space and
2579	 *	add it to the transmit ring.
2580	 */
2581	tdinfo->skb = skb;
2582	tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2583								DMA_TO_DEVICE);
2584	td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2585	td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2586	td_ptr->td_buf[0].pa_high = 0;
2587	td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2588
2589	/* Handle fragments */
2590	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2591		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2592
2593		tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2594							  frag, 0,
2595							  skb_frag_size(frag),
2596							  DMA_TO_DEVICE);
2597
2598		td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2599		td_ptr->td_buf[i + 1].pa_high = 0;
2600		td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2601	}
2602	tdinfo->nskb_dma = i + 1;
2603
2604	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2605
2606	if (skb_vlan_tag_present(skb)) {
2607		td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2608		td_ptr->tdesc1.TCR |= TCR0_VETAG;
2609	}
2610
2611	/*
2612	 *	Handle hardware checksum
2613	 */
2614	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2615		const struct iphdr *ip = ip_hdr(skb);
2616		if (ip->protocol == IPPROTO_TCP)
2617			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2618		else if (ip->protocol == IPPROTO_UDP)
2619			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2620		td_ptr->tdesc1.TCR |= TCR0_IPCK;
2621	}
2622
2623	prev = index - 1;
2624	if (prev < 0)
2625		prev = vptr->options.numtx - 1;
2626	td_ptr->tdesc0.len |= OWNED_BY_NIC;
2627	vptr->tx.used[qnum]++;
2628	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2629
2630	if (AVAIL_TD(vptr, qnum) < 1)
2631		netif_stop_queue(dev);
2632
2633	td_ptr = &(vptr->tx.rings[qnum][prev]);
2634	td_ptr->td_buf[0].size |= TD_QUEUE;
2635	mac_tx_queue_wake(vptr->mac_regs, qnum);
2636
2637	spin_unlock_irqrestore(&vptr->lock, flags);
2638out:
2639	return NETDEV_TX_OK;
2640}
2641
2642static const struct net_device_ops velocity_netdev_ops = {
2643	.ndo_open		= velocity_open,
2644	.ndo_stop		= velocity_close,
2645	.ndo_start_xmit		= velocity_xmit,
2646	.ndo_get_stats		= velocity_get_stats,
2647	.ndo_validate_addr	= eth_validate_addr,
2648	.ndo_set_mac_address	= eth_mac_addr,
2649	.ndo_set_rx_mode	= velocity_set_multi,
2650	.ndo_change_mtu		= velocity_change_mtu,
2651	.ndo_do_ioctl		= velocity_ioctl,
2652	.ndo_vlan_rx_add_vid	= velocity_vlan_rx_add_vid,
2653	.ndo_vlan_rx_kill_vid	= velocity_vlan_rx_kill_vid,
2654#ifdef CONFIG_NET_POLL_CONTROLLER
2655	.ndo_poll_controller = velocity_poll_controller,
2656#endif
2657};
2658
2659/**
2660 *	velocity_init_info	-	init private data
2661 *	@pdev: PCI device
2662 *	@vptr: Velocity info
2663 *	@info: Board type
2664 *
2665 *	Set up the initial velocity_info struct for the device that has been
2666 *	discovered.
2667 */
2668static void velocity_init_info(struct velocity_info *vptr,
2669				const struct velocity_info_tbl *info)
2670{
2671	vptr->chip_id = info->chip_id;
2672	vptr->tx.numq = info->txqueue;
2673	vptr->multicast_limit = MCAM_SIZE;
2674	spin_lock_init(&vptr->lock);
2675}
2676
2677/**
2678 *	velocity_get_pci_info	-	retrieve PCI info for device
2679 *	@vptr: velocity device
2680 *	@pdev: PCI device it matches
2681 *
2682 *	Retrieve the PCI configuration space data that interests us from
2683 *	the kernel PCI layer
2684 */
2685static int velocity_get_pci_info(struct velocity_info *vptr)
2686{
2687	struct pci_dev *pdev = vptr->pdev;
2688
2689	pci_set_master(pdev);
2690
2691	vptr->ioaddr = pci_resource_start(pdev, 0);
2692	vptr->memaddr = pci_resource_start(pdev, 1);
2693
2694	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2695		dev_err(&pdev->dev,
2696			   "region #0 is not an I/O resource, aborting.\n");
2697		return -EINVAL;
2698	}
2699
2700	if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2701		dev_err(&pdev->dev,
2702			   "region #1 is an I/O resource, aborting.\n");
2703		return -EINVAL;
2704	}
2705
2706	if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2707		dev_err(&pdev->dev, "region #1 is too small.\n");
2708		return -EINVAL;
2709	}
2710
2711	return 0;
2712}
2713
2714/**
2715 *	velocity_get_platform_info - retrieve platform info for device
2716 *	@vptr: velocity device
2717 *	@pdev: platform device it matches
2718 *
2719 *	Retrieve the Platform configuration data that interests us
2720 */
2721static int velocity_get_platform_info(struct velocity_info *vptr)
2722{
2723	struct resource res;
2724	int ret;
2725
2726	if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2727		vptr->no_eeprom = 1;
2728
2729	ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2730	if (ret) {
2731		dev_err(vptr->dev, "unable to find memory address\n");
2732		return ret;
2733	}
2734
2735	vptr->memaddr = res.start;
2736
2737	if (resource_size(&res) < VELOCITY_IO_SIZE) {
2738		dev_err(vptr->dev, "memory region is too small.\n");
2739		return -EINVAL;
2740	}
2741
2742	return 0;
2743}
2744
2745/**
2746 *	velocity_print_info	-	per driver data
2747 *	@vptr: velocity
2748 *
2749 *	Print per driver data as the kernel driver finds Velocity
2750 *	hardware
2751 */
2752static void velocity_print_info(struct velocity_info *vptr)
2753{
2754	netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n",
2755		    get_chip_name(vptr->chip_id), vptr->netdev->dev_addr);
2756}
2757
2758static u32 velocity_get_link(struct net_device *dev)
2759{
2760	struct velocity_info *vptr = netdev_priv(dev);
2761	struct mac_regs __iomem *regs = vptr->mac_regs;
2762	return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2763}
2764
2765/**
2766 *	velocity_probe - set up discovered velocity device
2767 *	@pdev: PCI device
2768 *	@ent: PCI device table entry that matched
 
2769 *	@bustype: bus that device is connected to
2770 *
2771 *	Configure a discovered adapter from scratch. Return a negative
2772 *	errno error code on failure paths.
2773 */
2774static int velocity_probe(struct device *dev, int irq,
2775			   const struct velocity_info_tbl *info,
2776			   enum velocity_bus_type bustype)
2777{
2778	struct net_device *netdev;
2779	int i;
2780	struct velocity_info *vptr;
2781	struct mac_regs __iomem *regs;
2782	int ret = -ENOMEM;
 
2783
2784	/* FIXME: this driver, like almost all other ethernet drivers,
2785	 * can support more than MAX_UNITS.
2786	 */
2787	if (velocity_nics >= MAX_UNITS) {
2788		dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2789		return -ENODEV;
2790	}
2791
2792	netdev = alloc_etherdev(sizeof(struct velocity_info));
2793	if (!netdev)
2794		goto out;
2795
2796	/* Chain it all together */
2797
2798	SET_NETDEV_DEV(netdev, dev);
2799	vptr = netdev_priv(netdev);
2800
2801	pr_info_once("%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2802	pr_info_once("Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2803	pr_info_once("Copyright (c) 2004 Red Hat Inc.\n");
2804
2805	netdev->irq = irq;
2806	vptr->netdev = netdev;
2807	vptr->dev = dev;
2808
2809	velocity_init_info(vptr, info);
2810
2811	if (bustype == BUS_PCI) {
2812		vptr->pdev = to_pci_dev(dev);
2813
2814		ret = velocity_get_pci_info(vptr);
2815		if (ret < 0)
2816			goto err_free_dev;
2817	} else {
2818		vptr->pdev = NULL;
2819		ret = velocity_get_platform_info(vptr);
2820		if (ret < 0)
2821			goto err_free_dev;
2822	}
2823
2824	regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2825	if (regs == NULL) {
2826		ret = -EIO;
2827		goto err_free_dev;
2828	}
2829
2830	vptr->mac_regs = regs;
2831	vptr->rev_id = readb(&regs->rev_id);
2832
2833	mac_wol_reset(regs);
2834
2835	for (i = 0; i < 6; i++)
2836		netdev->dev_addr[i] = readb(&regs->PAR[i]);
 
2837
2838
2839	velocity_get_options(&vptr->options, velocity_nics);
2840
2841	/*
2842	 *	Mask out the options cannot be set to the chip
2843	 */
2844
2845	vptr->options.flags &= info->flags;
2846
2847	/*
2848	 *	Enable the chip specified capbilities
2849	 */
2850
2851	vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2852
2853	vptr->wol_opts = vptr->options.wol_opts;
2854	vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2855
2856	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2857
2858	netdev->netdev_ops = &velocity_netdev_ops;
2859	netdev->ethtool_ops = &velocity_ethtool_ops;
2860	netif_napi_add(netdev, &vptr->napi, velocity_poll,
2861							VELOCITY_NAPI_WEIGHT);
2862
2863	netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2864			   NETIF_F_HW_VLAN_CTAG_TX;
2865	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2866			NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2867			NETIF_F_IP_CSUM;
2868
2869	/* MTU range: 64 - 9000 */
2870	netdev->min_mtu = VELOCITY_MIN_MTU;
2871	netdev->max_mtu = VELOCITY_MAX_MTU;
2872
2873	ret = register_netdev(netdev);
2874	if (ret < 0)
2875		goto err_iounmap;
2876
2877	if (!velocity_get_link(netdev)) {
2878		netif_carrier_off(netdev);
2879		vptr->mii_status |= VELOCITY_LINK_FAIL;
2880	}
2881
2882	velocity_print_info(vptr);
2883	dev_set_drvdata(vptr->dev, netdev);
2884
2885	/* and leave the chip powered down */
2886
2887	velocity_set_power_state(vptr, PCI_D3hot);
2888	velocity_nics++;
2889out:
2890	return ret;
2891
2892err_iounmap:
2893	netif_napi_del(&vptr->napi);
2894	iounmap(regs);
2895err_free_dev:
2896	free_netdev(netdev);
2897	goto out;
2898}
2899
2900/**
2901 *	velocity_remove	- device unplug
2902 *	@dev: device being removed
2903 *
2904 *	Device unload callback. Called on an unplug or on module
2905 *	unload for each active device that is present. Disconnects
2906 *	the device from the network layer and frees all the resources
2907 */
2908static int velocity_remove(struct device *dev)
2909{
2910	struct net_device *netdev = dev_get_drvdata(dev);
2911	struct velocity_info *vptr = netdev_priv(netdev);
2912
2913	unregister_netdev(netdev);
2914	netif_napi_del(&vptr->napi);
2915	iounmap(vptr->mac_regs);
2916	free_netdev(netdev);
2917	velocity_nics--;
2918
2919	return 0;
2920}
2921
2922static int velocity_pci_probe(struct pci_dev *pdev,
2923			       const struct pci_device_id *ent)
2924{
2925	const struct velocity_info_tbl *info =
2926					&chip_info_table[ent->driver_data];
2927	int ret;
2928
2929	ret = pci_enable_device(pdev);
2930	if (ret < 0)
2931		return ret;
2932
2933	ret = pci_request_regions(pdev, VELOCITY_NAME);
2934	if (ret < 0) {
2935		dev_err(&pdev->dev, "No PCI resources.\n");
2936		goto fail1;
2937	}
2938
2939	ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2940	if (ret == 0)
2941		return 0;
2942
2943	pci_release_regions(pdev);
2944fail1:
2945	pci_disable_device(pdev);
2946	return ret;
2947}
2948
2949static void velocity_pci_remove(struct pci_dev *pdev)
2950{
2951	velocity_remove(&pdev->dev);
2952
2953	pci_release_regions(pdev);
2954	pci_disable_device(pdev);
2955}
2956
2957static int velocity_platform_probe(struct platform_device *pdev)
2958{
2959	const struct of_device_id *of_id;
2960	const struct velocity_info_tbl *info;
2961	int irq;
2962
2963	of_id = of_match_device(velocity_of_ids, &pdev->dev);
2964	if (!of_id)
2965		return -EINVAL;
2966	info = of_id->data;
2967
2968	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2969	if (!irq)
2970		return -EINVAL;
2971
2972	return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2973}
2974
2975static int velocity_platform_remove(struct platform_device *pdev)
2976{
2977	velocity_remove(&pdev->dev);
2978
2979	return 0;
2980}
2981
2982#ifdef CONFIG_PM_SLEEP
2983/**
2984 *	wol_calc_crc		-	WOL CRC
 
2985 *	@pattern: data pattern
2986 *	@mask_pattern: mask
2987 *
2988 *	Compute the wake on lan crc hashes for the packet header
2989 *	we are interested in.
2990 */
2991static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2992{
2993	u16 crc = 0xFFFF;
2994	u8 mask;
2995	int i, j;
2996
2997	for (i = 0; i < size; i++) {
2998		mask = mask_pattern[i];
2999
3000		/* Skip this loop if the mask equals to zero */
3001		if (mask == 0x00)
3002			continue;
3003
3004		for (j = 0; j < 8; j++) {
3005			if ((mask & 0x01) == 0) {
3006				mask >>= 1;
3007				continue;
3008			}
3009			mask >>= 1;
3010			crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
3011		}
3012	}
3013	/*	Finally, invert the result once to get the correct data */
3014	crc = ~crc;
3015	return bitrev32(crc) >> 16;
3016}
3017
3018/**
3019 *	velocity_set_wol	-	set up for wake on lan
3020 *	@vptr: velocity to set WOL status on
3021 *
3022 *	Set a card up for wake on lan either by unicast or by
3023 *	ARP packet.
3024 *
3025 *	FIXME: check static buffer is safe here
3026 */
3027static int velocity_set_wol(struct velocity_info *vptr)
3028{
3029	struct mac_regs __iomem *regs = vptr->mac_regs;
3030	enum speed_opt spd_dpx = vptr->options.spd_dpx;
3031	static u8 buf[256];
3032	int i;
3033
3034	static u32 mask_pattern[2][4] = {
3035		{0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3036		{0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}	 /* Magic Packet */
3037	};
3038
3039	writew(0xFFFF, &regs->WOLCRClr);
3040	writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3041	writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3042
3043	/*
3044	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3045	   writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3046	 */
3047
3048	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3049		writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3050
3051	if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3052		struct arp_packet *arp = (struct arp_packet *) buf;
3053		u16 crc;
3054		memset(buf, 0, sizeof(struct arp_packet) + 7);
3055
3056		for (i = 0; i < 4; i++)
3057			writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3058
3059		arp->type = htons(ETH_P_ARP);
3060		arp->ar_op = htons(1);
3061
3062		memcpy(arp->ar_tip, vptr->ip_addr, 4);
3063
3064		crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3065				(u8 *) & mask_pattern[0][0]);
3066
3067		writew(crc, &regs->PatternCRC[0]);
3068		writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3069	}
3070
3071	BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3072	BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3073
3074	writew(0x0FFF, &regs->WOLSRClr);
3075
3076	if (spd_dpx == SPD_DPX_1000_FULL)
3077		goto mac_done;
3078
3079	if (spd_dpx != SPD_DPX_AUTO)
3080		goto advertise_done;
3081
3082	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3083		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3084			MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3085
3086		MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3087	}
3088
3089	if (vptr->mii_status & VELOCITY_SPEED_1000)
3090		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3091
3092advertise_done:
3093	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3094
3095	{
3096		u8 GCR;
3097		GCR = readb(&regs->CHIPGCR);
3098		GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3099		writeb(GCR, &regs->CHIPGCR);
3100	}
3101
3102mac_done:
3103	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3104	/* Turn on SWPTAG just before entering power mode */
3105	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3106	/* Go to bed ..... */
3107	BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3108
3109	return 0;
3110}
3111
3112/**
3113 *	velocity_save_context	-	save registers
3114 *	@vptr: velocity
3115 *	@context: buffer for stored context
3116 *
3117 *	Retrieve the current configuration from the velocity hardware
3118 *	and stash it in the context structure, for use by the context
3119 *	restore functions. This allows us to save things we need across
3120 *	power down states
3121 */
3122static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3123{
3124	struct mac_regs __iomem *regs = vptr->mac_regs;
3125	u16 i;
3126	u8 __iomem *ptr = (u8 __iomem *)regs;
3127
3128	for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3129		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3130
3131	for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3132		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3133
3134	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3135		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3136
3137}
3138
3139static int velocity_suspend(struct device *dev)
3140{
3141	struct net_device *netdev = dev_get_drvdata(dev);
3142	struct velocity_info *vptr = netdev_priv(netdev);
3143	unsigned long flags;
3144
3145	if (!netif_running(vptr->netdev))
3146		return 0;
3147
3148	netif_device_detach(vptr->netdev);
3149
3150	spin_lock_irqsave(&vptr->lock, flags);
3151	if (vptr->pdev)
3152		pci_save_state(vptr->pdev);
3153
3154	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3155		velocity_get_ip(vptr);
3156		velocity_save_context(vptr, &vptr->context);
3157		velocity_shutdown(vptr);
3158		velocity_set_wol(vptr);
3159		if (vptr->pdev)
3160			pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3161		velocity_set_power_state(vptr, PCI_D3hot);
3162	} else {
3163		velocity_save_context(vptr, &vptr->context);
3164		velocity_shutdown(vptr);
3165		if (vptr->pdev)
3166			pci_disable_device(vptr->pdev);
3167		velocity_set_power_state(vptr, PCI_D3hot);
3168	}
3169
3170	spin_unlock_irqrestore(&vptr->lock, flags);
3171	return 0;
3172}
3173
3174/**
3175 *	velocity_restore_context	-	restore registers
3176 *	@vptr: velocity
3177 *	@context: buffer for stored context
3178 *
3179 *	Reload the register configuration from the velocity context
3180 *	created by velocity_save_context.
3181 */
3182static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3183{
3184	struct mac_regs __iomem *regs = vptr->mac_regs;
3185	int i;
3186	u8 __iomem *ptr = (u8 __iomem *)regs;
3187
3188	for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3189		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3190
3191	/* Just skip cr0 */
3192	for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3193		/* Clear */
3194		writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3195		/* Set */
3196		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3197	}
3198
3199	for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3200		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3201
3202	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3203		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3204
3205	for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3206		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3207}
3208
3209static int velocity_resume(struct device *dev)
3210{
3211	struct net_device *netdev = dev_get_drvdata(dev);
3212	struct velocity_info *vptr = netdev_priv(netdev);
3213	unsigned long flags;
3214	int i;
3215
3216	if (!netif_running(vptr->netdev))
3217		return 0;
3218
3219	velocity_set_power_state(vptr, PCI_D0);
3220
3221	if (vptr->pdev) {
3222		pci_enable_wake(vptr->pdev, PCI_D0, 0);
3223		pci_restore_state(vptr->pdev);
3224	}
3225
3226	mac_wol_reset(vptr->mac_regs);
3227
3228	spin_lock_irqsave(&vptr->lock, flags);
3229	velocity_restore_context(vptr, &vptr->context);
3230	velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3231	mac_disable_int(vptr->mac_regs);
3232
3233	velocity_tx_srv(vptr);
3234
3235	for (i = 0; i < vptr->tx.numq; i++) {
3236		if (vptr->tx.used[i])
3237			mac_tx_queue_wake(vptr->mac_regs, i);
3238	}
3239
3240	mac_enable_int(vptr->mac_regs);
3241	spin_unlock_irqrestore(&vptr->lock, flags);
3242	netif_device_attach(vptr->netdev);
3243
3244	return 0;
3245}
3246#endif	/* CONFIG_PM_SLEEP */
3247
3248static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3249
3250/*
3251 *	Definition for our device driver. The PCI layer interface
3252 *	uses this to handle all our card discover and plugging
3253 */
3254static struct pci_driver velocity_pci_driver = {
3255	.name		= VELOCITY_NAME,
3256	.id_table	= velocity_pci_id_table,
3257	.probe		= velocity_pci_probe,
3258	.remove		= velocity_pci_remove,
3259	.driver = {
3260		.pm = &velocity_pm_ops,
3261	},
3262};
3263
3264static struct platform_driver velocity_platform_driver = {
3265	.probe		= velocity_platform_probe,
3266	.remove		= velocity_platform_remove,
3267	.driver = {
3268		.name = "via-velocity",
3269		.of_match_table = velocity_of_ids,
3270		.pm = &velocity_pm_ops,
3271	},
3272};
3273
3274/**
3275 *	velocity_ethtool_up	-	pre hook for ethtool
3276 *	@dev: network device
3277 *
3278 *	Called before an ethtool operation. We need to make sure the
3279 *	chip is out of D3 state before we poke at it. In case of ethtool
3280 *	ops nesting, only wake the device up in the outermost block.
3281 */
3282static int velocity_ethtool_up(struct net_device *dev)
3283{
3284	struct velocity_info *vptr = netdev_priv(dev);
3285
3286	if (vptr->ethtool_ops_nesting == U32_MAX)
3287		return -EBUSY;
3288	if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
3289		velocity_set_power_state(vptr, PCI_D0);
3290	return 0;
3291}
3292
3293/**
3294 *	velocity_ethtool_down	-	post hook for ethtool
3295 *	@dev: network device
3296 *
3297 *	Called after an ethtool operation. Restore the chip back to D3
3298 *	state if it isn't running. In case of ethtool ops nesting, only
3299 *	put the device to sleep in the outermost block.
3300 */
3301static void velocity_ethtool_down(struct net_device *dev)
3302{
3303	struct velocity_info *vptr = netdev_priv(dev);
3304
3305	if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
3306		velocity_set_power_state(vptr, PCI_D3hot);
3307}
3308
3309static int velocity_get_link_ksettings(struct net_device *dev,
3310				       struct ethtool_link_ksettings *cmd)
3311{
3312	struct velocity_info *vptr = netdev_priv(dev);
3313	struct mac_regs __iomem *regs = vptr->mac_regs;
3314	u32 status;
3315	u32 supported, advertising;
3316
3317	status = check_connection_type(vptr->mac_regs);
3318
3319	supported = SUPPORTED_TP |
3320			SUPPORTED_Autoneg |
3321			SUPPORTED_10baseT_Half |
3322			SUPPORTED_10baseT_Full |
3323			SUPPORTED_100baseT_Half |
3324			SUPPORTED_100baseT_Full |
3325			SUPPORTED_1000baseT_Half |
3326			SUPPORTED_1000baseT_Full;
3327
3328	advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3329	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3330		advertising |=
3331			ADVERTISED_10baseT_Half |
3332			ADVERTISED_10baseT_Full |
3333			ADVERTISED_100baseT_Half |
3334			ADVERTISED_100baseT_Full |
3335			ADVERTISED_1000baseT_Half |
3336			ADVERTISED_1000baseT_Full;
3337	} else {
3338		switch (vptr->options.spd_dpx) {
3339		case SPD_DPX_1000_FULL:
3340			advertising |= ADVERTISED_1000baseT_Full;
3341			break;
3342		case SPD_DPX_100_HALF:
3343			advertising |= ADVERTISED_100baseT_Half;
3344			break;
3345		case SPD_DPX_100_FULL:
3346			advertising |= ADVERTISED_100baseT_Full;
3347			break;
3348		case SPD_DPX_10_HALF:
3349			advertising |= ADVERTISED_10baseT_Half;
3350			break;
3351		case SPD_DPX_10_FULL:
3352			advertising |= ADVERTISED_10baseT_Full;
3353			break;
3354		default:
3355			break;
3356		}
3357	}
3358
3359	if (status & VELOCITY_SPEED_1000)
3360		cmd->base.speed = SPEED_1000;
3361	else if (status & VELOCITY_SPEED_100)
3362		cmd->base.speed = SPEED_100;
3363	else
3364		cmd->base.speed = SPEED_10;
3365
3366	cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3367		AUTONEG_ENABLE : AUTONEG_DISABLE;
3368	cmd->base.port = PORT_TP;
3369	cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
3370
3371	if (status & VELOCITY_DUPLEX_FULL)
3372		cmd->base.duplex = DUPLEX_FULL;
3373	else
3374		cmd->base.duplex = DUPLEX_HALF;
3375
3376	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3377						supported);
3378	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3379						advertising);
3380
3381	return 0;
3382}
3383
3384static int velocity_set_link_ksettings(struct net_device *dev,
3385				       const struct ethtool_link_ksettings *cmd)
3386{
3387	struct velocity_info *vptr = netdev_priv(dev);
3388	u32 speed = cmd->base.speed;
3389	u32 curr_status;
3390	u32 new_status = 0;
3391	int ret = 0;
3392
3393	curr_status = check_connection_type(vptr->mac_regs);
3394	curr_status &= (~VELOCITY_LINK_FAIL);
3395
3396	new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3397	new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3398	new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3399	new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3400	new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3401		       VELOCITY_DUPLEX_FULL : 0);
3402
3403	if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3404	    (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3405		ret = -EINVAL;
3406	} else {
3407		enum speed_opt spd_dpx;
3408
3409		if (new_status & VELOCITY_AUTONEG_ENABLE)
3410			spd_dpx = SPD_DPX_AUTO;
3411		else if ((new_status & VELOCITY_SPEED_1000) &&
3412			 (new_status & VELOCITY_DUPLEX_FULL)) {
3413			spd_dpx = SPD_DPX_1000_FULL;
3414		} else if (new_status & VELOCITY_SPEED_100)
3415			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3416				SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3417		else if (new_status & VELOCITY_SPEED_10)
3418			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3419				SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3420		else
3421			return -EOPNOTSUPP;
3422
3423		vptr->options.spd_dpx = spd_dpx;
3424
3425		velocity_set_media_mode(vptr, new_status);
3426	}
3427
3428	return ret;
3429}
3430
3431static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3432{
3433	struct velocity_info *vptr = netdev_priv(dev);
3434
3435	strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3436	strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3437	if (vptr->pdev)
3438		strlcpy(info->bus_info, pci_name(vptr->pdev),
3439						sizeof(info->bus_info));
3440	else
3441		strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3442}
3443
3444static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3445{
3446	struct velocity_info *vptr = netdev_priv(dev);
3447	wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3448	wol->wolopts |= WAKE_MAGIC;
3449	/*
3450	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3451		   wol.wolopts|=WAKE_PHY;
3452			 */
3453	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3454		wol->wolopts |= WAKE_UCAST;
3455	if (vptr->wol_opts & VELOCITY_WOL_ARP)
3456		wol->wolopts |= WAKE_ARP;
3457	memcpy(&wol->sopass, vptr->wol_passwd, 6);
3458}
3459
3460static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3461{
3462	struct velocity_info *vptr = netdev_priv(dev);
3463
3464	if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3465		return -EFAULT;
3466	vptr->wol_opts = VELOCITY_WOL_MAGIC;
3467
3468	/*
3469	   if (wol.wolopts & WAKE_PHY) {
3470	   vptr->wol_opts|=VELOCITY_WOL_PHY;
3471	   vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3472	   }
3473	 */
3474
3475	if (wol->wolopts & WAKE_MAGIC) {
3476		vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3477		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3478	}
3479	if (wol->wolopts & WAKE_UCAST) {
3480		vptr->wol_opts |= VELOCITY_WOL_UCAST;
3481		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3482	}
3483	if (wol->wolopts & WAKE_ARP) {
3484		vptr->wol_opts |= VELOCITY_WOL_ARP;
3485		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3486	}
3487	memcpy(vptr->wol_passwd, wol->sopass, 6);
3488	return 0;
3489}
3490
3491static int get_pending_timer_val(int val)
3492{
3493	int mult_bits = val >> 6;
3494	int mult = 1;
3495
3496	switch (mult_bits)
3497	{
3498	case 1:
3499		mult = 4; break;
3500	case 2:
3501		mult = 16; break;
3502	case 3:
3503		mult = 64; break;
3504	case 0:
3505	default:
3506		break;
3507	}
3508
3509	return (val & 0x3f) * mult;
3510}
3511
3512static void set_pending_timer_val(int *val, u32 us)
3513{
3514	u8 mult = 0;
3515	u8 shift = 0;
3516
3517	if (us >= 0x3f) {
3518		mult = 1; /* mult with 4 */
3519		shift = 2;
3520	}
3521	if (us >= 0x3f * 4) {
3522		mult = 2; /* mult with 16 */
3523		shift = 4;
3524	}
3525	if (us >= 0x3f * 16) {
3526		mult = 3; /* mult with 64 */
3527		shift = 6;
3528	}
3529
3530	*val = (mult << 6) | ((us >> shift) & 0x3f);
3531}
3532
3533
3534static int velocity_get_coalesce(struct net_device *dev,
3535		struct ethtool_coalesce *ecmd)
 
 
3536{
3537	struct velocity_info *vptr = netdev_priv(dev);
3538
3539	ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3540	ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3541
3542	ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3543	ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3544
3545	return 0;
3546}
3547
3548static int velocity_set_coalesce(struct net_device *dev,
3549		struct ethtool_coalesce *ecmd)
 
 
3550{
3551	struct velocity_info *vptr = netdev_priv(dev);
3552	int max_us = 0x3f * 64;
3553	unsigned long flags;
3554
3555	/* 6 bits of  */
3556	if (ecmd->tx_coalesce_usecs > max_us)
3557		return -EINVAL;
3558	if (ecmd->rx_coalesce_usecs > max_us)
3559		return -EINVAL;
3560
3561	if (ecmd->tx_max_coalesced_frames > 0xff)
3562		return -EINVAL;
3563	if (ecmd->rx_max_coalesced_frames > 0xff)
3564		return -EINVAL;
3565
3566	vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3567	vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3568
3569	set_pending_timer_val(&vptr->options.rxqueue_timer,
3570			ecmd->rx_coalesce_usecs);
3571	set_pending_timer_val(&vptr->options.txqueue_timer,
3572			ecmd->tx_coalesce_usecs);
3573
3574	/* Setup the interrupt suppression and queue timers */
3575	spin_lock_irqsave(&vptr->lock, flags);
3576	mac_disable_int(vptr->mac_regs);
3577	setup_adaptive_interrupts(vptr);
3578	setup_queue_timers(vptr);
3579
3580	mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3581	mac_clear_isr(vptr->mac_regs);
3582	mac_enable_int(vptr->mac_regs);
3583	spin_unlock_irqrestore(&vptr->lock, flags);
3584
3585	return 0;
3586}
3587
3588static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3589	"rx_all",
3590	"rx_ok",
3591	"tx_ok",
3592	"rx_error",
3593	"rx_runt_ok",
3594	"rx_runt_err",
3595	"rx_64",
3596	"tx_64",
3597	"rx_65_to_127",
3598	"tx_65_to_127",
3599	"rx_128_to_255",
3600	"tx_128_to_255",
3601	"rx_256_to_511",
3602	"tx_256_to_511",
3603	"rx_512_to_1023",
3604	"tx_512_to_1023",
3605	"rx_1024_to_1518",
3606	"tx_1024_to_1518",
3607	"tx_ether_collisions",
3608	"rx_crc_errors",
3609	"rx_jumbo",
3610	"tx_jumbo",
3611	"rx_mac_control_frames",
3612	"tx_mac_control_frames",
3613	"rx_frame_alignment_errors",
3614	"rx_long_ok",
3615	"rx_long_err",
3616	"tx_sqe_errors",
3617	"rx_no_buf",
3618	"rx_symbol_errors",
3619	"in_range_length_errors",
3620	"late_collisions"
3621};
3622
3623static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3624{
3625	switch (sset) {
3626	case ETH_SS_STATS:
3627		memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3628		break;
3629	}
3630}
3631
3632static int velocity_get_sset_count(struct net_device *dev, int sset)
3633{
3634	switch (sset) {
3635	case ETH_SS_STATS:
3636		return ARRAY_SIZE(velocity_gstrings);
3637	default:
3638		return -EOPNOTSUPP;
3639	}
3640}
3641
3642static void velocity_get_ethtool_stats(struct net_device *dev,
3643				       struct ethtool_stats *stats, u64 *data)
3644{
3645	if (netif_running(dev)) {
3646		struct velocity_info *vptr = netdev_priv(dev);
3647		u32 *p = vptr->mib_counter;
3648		int i;
3649
3650		spin_lock_irq(&vptr->lock);
3651		velocity_update_hw_mibs(vptr);
3652		spin_unlock_irq(&vptr->lock);
3653
3654		for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3655			*data++ = *p++;
3656	}
3657}
3658
3659static const struct ethtool_ops velocity_ethtool_ops = {
3660	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3661				     ETHTOOL_COALESCE_MAX_FRAMES,
3662	.get_drvinfo		= velocity_get_drvinfo,
3663	.get_wol		= velocity_ethtool_get_wol,
3664	.set_wol		= velocity_ethtool_set_wol,
3665	.get_link		= velocity_get_link,
3666	.get_strings		= velocity_get_strings,
3667	.get_sset_count		= velocity_get_sset_count,
3668	.get_ethtool_stats	= velocity_get_ethtool_stats,
3669	.get_coalesce		= velocity_get_coalesce,
3670	.set_coalesce		= velocity_set_coalesce,
3671	.begin			= velocity_ethtool_up,
3672	.complete		= velocity_ethtool_down,
3673	.get_link_ksettings	= velocity_get_link_ksettings,
3674	.set_link_ksettings	= velocity_set_link_ksettings,
3675};
3676
3677#if defined(CONFIG_PM) && defined(CONFIG_INET)
3678static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3679{
3680	struct in_ifaddr *ifa = ptr;
3681	struct net_device *dev = ifa->ifa_dev->dev;
3682
3683	if (dev_net(dev) == &init_net &&
3684	    dev->netdev_ops == &velocity_netdev_ops)
3685		velocity_get_ip(netdev_priv(dev));
3686
3687	return NOTIFY_DONE;
3688}
3689
3690static struct notifier_block velocity_inetaddr_notifier = {
3691	.notifier_call	= velocity_netdev_event,
3692};
3693
3694static void velocity_register_notifier(void)
3695{
3696	register_inetaddr_notifier(&velocity_inetaddr_notifier);
3697}
3698
3699static void velocity_unregister_notifier(void)
3700{
3701	unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3702}
3703
3704#else
3705
3706#define velocity_register_notifier()	do {} while (0)
3707#define velocity_unregister_notifier()	do {} while (0)
3708
3709#endif	/* defined(CONFIG_PM) && defined(CONFIG_INET) */
3710
3711/**
3712 *	velocity_init_module	-	load time function
3713 *
3714 *	Called when the velocity module is loaded. The PCI driver
3715 *	is registered with the PCI layer, and in turn will call
3716 *	the probe functions for each velocity adapter installed
3717 *	in the system.
3718 */
3719static int __init velocity_init_module(void)
3720{
3721	int ret_pci, ret_platform;
3722
3723	velocity_register_notifier();
3724
3725	ret_pci = pci_register_driver(&velocity_pci_driver);
3726	ret_platform = platform_driver_register(&velocity_platform_driver);
3727
3728	/* if both_registers failed, remove the notifier */
3729	if ((ret_pci < 0) && (ret_platform < 0)) {
3730		velocity_unregister_notifier();
3731		return ret_pci;
3732	}
3733
3734	return 0;
3735}
3736
3737/**
3738 *	velocity_cleanup	-	module unload
3739 *
3740 *	When the velocity hardware is unloaded this function is called.
3741 *	It will clean up the notifiers and the unregister the PCI
3742 *	driver interface for this hardware. This in turn cleans up
3743 *	all discovered interfaces before returning from the function
3744 */
3745static void __exit velocity_cleanup_module(void)
3746{
3747	velocity_unregister_notifier();
3748
3749	pci_unregister_driver(&velocity_pci_driver);
3750	platform_driver_unregister(&velocity_platform_driver);
3751}
3752
3753module_init(velocity_init_module);
3754module_exit(velocity_cleanup_module);