Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * New driver for Marvell Yukon 2 chipset.
   4 * Based on earlier sk98lin, and skge driver.
   5 *
   6 * This driver intentionally does not support all the features
   7 * of the original driver such as link fail-over and link management because
   8 * those should be done at higher levels.
   9 *
  10 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/crc32.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/netdevice.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/etherdevice.h>
  21#include <linux/ethtool.h>
  22#include <linux/pci.h>
  23#include <linux/interrupt.h>
  24#include <linux/ip.h>
  25#include <linux/slab.h>
  26#include <net/ip.h>
  27#include <linux/tcp.h>
  28#include <linux/in.h>
  29#include <linux/delay.h>
  30#include <linux/workqueue.h>
  31#include <linux/if_vlan.h>
  32#include <linux/prefetch.h>
  33#include <linux/debugfs.h>
  34#include <linux/mii.h>
 
  35#include <linux/of_net.h>
  36#include <linux/dmi.h>
  37#include <linux/skbuff_ref.h>
  38
  39#include <asm/irq.h>
  40
  41#include "sky2.h"
  42
  43#define DRV_NAME		"sky2"
  44#define DRV_VERSION		"1.30"
  45
  46/*
  47 * The Yukon II chipset takes 64 bit command blocks (called list elements)
  48 * that are organized into three (receive, transmit, status) different rings
  49 * similar to Tigon3.
  50 */
  51
  52#define RX_LE_SIZE	    	1024
  53#define RX_LE_BYTES		(RX_LE_SIZE*sizeof(struct sky2_rx_le))
  54#define RX_MAX_PENDING		(RX_LE_SIZE/6 - 2)
  55#define RX_DEF_PENDING		RX_MAX_PENDING
  56
  57/* This is the worst case number of transmit list elements for a single skb:
  58 * VLAN:GSO + CKSUM + Data + skb_frags * DMA
  59 */
  60#define MAX_SKB_TX_LE	(2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
  61#define TX_MIN_PENDING		(MAX_SKB_TX_LE+1)
  62#define TX_MAX_PENDING		1024
  63#define TX_DEF_PENDING		63
  64
  65#define TX_WATCHDOG		(5 * HZ)
  66#define PHY_RETRIES		1000
  67
  68#define SKY2_EEPROM_MAGIC	0x9955aabb
  69
  70#define RING_NEXT(x, s)	(((x)+1) & ((s)-1))
  71
  72static const u32 default_msg =
  73    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  74    | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
  75    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  76
  77static int debug = -1;		/* defaults above */
  78module_param(debug, int, 0);
  79MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  80
  81static int copybreak __read_mostly = 128;
  82module_param(copybreak, int, 0);
  83MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  84
  85static int disable_msi = -1;
  86module_param(disable_msi, int, 0);
  87MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  88
  89static int legacy_pme = 0;
  90module_param(legacy_pme, int, 0);
  91MODULE_PARM_DESC(legacy_pme, "Legacy power management");
  92
  93static const struct pci_device_id sky2_id_table[] = {
  94	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
  95	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
  96	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
  97	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
  98	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, 	/* DGE-550SX */
  99	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },	/* DGE-560SX */
 100	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) },	/* DGE-550T */
 101	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
 102	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
 103	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
 104	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
 105	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
 106	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
 107	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
 108	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
 109	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
 110	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
 111	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
 112	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
 113	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
 114	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */
 115	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
 116	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */
 117	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
 118	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
 119	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
 120	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
 121	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
 122	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
 123	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
 124	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
 125	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
 126	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
 127	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
 128	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
 129	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
 130	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
 131	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
 132	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
 133	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
 134	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
 135	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
 136	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
 137	{ 0 }
 138};
 139
 140MODULE_DEVICE_TABLE(pci, sky2_id_table);
 141
 142/* Avoid conditionals by using array */
 143static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
 144static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
 145static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
 146
 147static void sky2_set_multicast(struct net_device *dev);
 148static irqreturn_t sky2_intr(int irq, void *dev_id);
 149
 150/* Access to PHY via serial interconnect */
 151static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
 152{
 153	int i;
 154
 155	gma_write16(hw, port, GM_SMI_DATA, val);
 156	gma_write16(hw, port, GM_SMI_CTRL,
 157		    GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
 158
 159	for (i = 0; i < PHY_RETRIES; i++) {
 160		u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
 161		if (ctrl == 0xffff)
 162			goto io_error;
 163
 164		if (!(ctrl & GM_SMI_CT_BUSY))
 165			return 0;
 166
 167		udelay(10);
 168	}
 169
 170	dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name);
 171	return -ETIMEDOUT;
 172
 173io_error:
 174	dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
 175	return -EIO;
 176}
 177
 178static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
 179{
 180	int i;
 181
 182	gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
 183		    | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
 184
 185	for (i = 0; i < PHY_RETRIES; i++) {
 186		u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
 187		if (ctrl == 0xffff)
 188			goto io_error;
 189
 190		if (ctrl & GM_SMI_CT_RD_VAL) {
 191			*val = gma_read16(hw, port, GM_SMI_DATA);
 192			return 0;
 193		}
 194
 195		udelay(10);
 196	}
 197
 198	dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name);
 199	return -ETIMEDOUT;
 200io_error:
 201	dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
 202	return -EIO;
 203}
 204
 205static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
 206{
 207	u16 v = 0;
 208	__gm_phy_read(hw, port, reg, &v);
 209	return v;
 210}
 211
 212
 213static void sky2_power_on(struct sky2_hw *hw)
 214{
 215	/* switch power to VCC (WA for VAUX problem) */
 216	sky2_write8(hw, B0_POWER_CTRL,
 217		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 218
 219	/* disable Core Clock Division, */
 220	sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 221
 222	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
 223		/* enable bits are inverted */
 224		sky2_write8(hw, B2_Y2_CLK_GATE,
 225			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 226			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 227			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
 228	else
 229		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 230
 231	if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
 232		u32 reg;
 233
 234		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
 235
 236		reg = sky2_pci_read32(hw, PCI_DEV_REG4);
 237		/* set all bits to 0 except bits 15..12 and 8 */
 238		reg &= P_ASPM_CONTROL_MSK;
 239		sky2_pci_write32(hw, PCI_DEV_REG4, reg);
 240
 241		reg = sky2_pci_read32(hw, PCI_DEV_REG5);
 242		/* set all bits to 0 except bits 28 & 27 */
 243		reg &= P_CTL_TIM_VMAIN_AV_MSK;
 244		sky2_pci_write32(hw, PCI_DEV_REG5, reg);
 245
 246		sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
 247
 248		sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
 249
 250		/* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
 251		reg = sky2_read32(hw, B2_GP_IO);
 252		reg |= GLB_GPIO_STAT_RACE_DIS;
 253		sky2_write32(hw, B2_GP_IO, reg);
 254
 255		sky2_read32(hw, B2_GP_IO);
 256	}
 257
 258	/* Turn on "driver loaded" LED */
 259	sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON);
 260}
 261
 262static void sky2_power_aux(struct sky2_hw *hw)
 263{
 264	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
 265		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 266	else
 267		/* enable bits are inverted */
 268		sky2_write8(hw, B2_Y2_CLK_GATE,
 269			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 270			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 271			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
 272
 273	/* switch power to VAUX if supported and PME from D3cold */
 274	if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
 275	     pci_pme_capable(hw->pdev, PCI_D3cold))
 276		sky2_write8(hw, B0_POWER_CTRL,
 277			    (PC_VAUX_ENA | PC_VCC_ENA |
 278			     PC_VAUX_ON | PC_VCC_OFF));
 279
 280	/* turn off "driver loaded LED" */
 281	sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF);
 282}
 283
 284static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
 285{
 286	u16 reg;
 287
 288	/* disable all GMAC IRQ's */
 289	sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
 290
 291	gma_write16(hw, port, GM_MC_ADDR_H1, 0);	/* clear MC hash */
 292	gma_write16(hw, port, GM_MC_ADDR_H2, 0);
 293	gma_write16(hw, port, GM_MC_ADDR_H3, 0);
 294	gma_write16(hw, port, GM_MC_ADDR_H4, 0);
 295
 296	reg = gma_read16(hw, port, GM_RX_CTRL);
 297	reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
 298	gma_write16(hw, port, GM_RX_CTRL, reg);
 299}
 300
 301/* flow control to advertise bits */
 302static const u16 copper_fc_adv[] = {
 303	[FC_NONE]	= 0,
 304	[FC_TX]		= PHY_M_AN_ASP,
 305	[FC_RX]		= PHY_M_AN_PC,
 306	[FC_BOTH]	= PHY_M_AN_PC | PHY_M_AN_ASP,
 307};
 308
 309/* flow control to advertise bits when using 1000BaseX */
 310static const u16 fiber_fc_adv[] = {
 311	[FC_NONE] = PHY_M_P_NO_PAUSE_X,
 312	[FC_TX]   = PHY_M_P_ASYM_MD_X,
 313	[FC_RX]	  = PHY_M_P_SYM_MD_X,
 314	[FC_BOTH] = PHY_M_P_BOTH_MD_X,
 315};
 316
 317/* flow control to GMA disable bits */
 318static const u16 gm_fc_disable[] = {
 319	[FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
 320	[FC_TX]	  = GM_GPCR_FC_RX_DIS,
 321	[FC_RX]	  = GM_GPCR_FC_TX_DIS,
 322	[FC_BOTH] = 0,
 323};
 324
 325
 326static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 327{
 328	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 329	u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
 330
 331	if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
 332	    !(hw->flags & SKY2_HW_NEWER_PHY)) {
 333		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 334
 335		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
 336			   PHY_M_EC_MAC_S_MSK);
 337		ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
 338
 339		/* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
 340		if (hw->chip_id == CHIP_ID_YUKON_EC)
 341			/* set downshift counter to 3x and enable downshift */
 342			ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
 343		else
 344			/* set master & slave downshift counter to 1x */
 345			ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
 346
 347		gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
 348	}
 349
 350	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 351	if (sky2_is_copper(hw)) {
 352		if (!(hw->flags & SKY2_HW_GIGABIT)) {
 353			/* enable automatic crossover */
 354			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
 355
 356			if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
 357			    hw->chip_rev == CHIP_REV_YU_FE2_A0) {
 358				u16 spec;
 359
 360				/* Enable Class A driver for FE+ A0 */
 361				spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
 362				spec |= PHY_M_FESC_SEL_CL_A;
 363				gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
 364			}
 365		} else {
 366			/* disable energy detect */
 367			ctrl &= ~PHY_M_PC_EN_DET_MSK;
 368
 369			/* enable automatic crossover */
 370			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
 371
 372			/* downshift on PHY 88E1112 and 88E1149 is changed */
 373			if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
 374			     (hw->flags & SKY2_HW_NEWER_PHY)) {
 375				/* set downshift counter to 3x and enable downshift */
 376				ctrl &= ~PHY_M_PC_DSC_MSK;
 377				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
 378			}
 379		}
 380	} else {
 381		/* workaround for deviation #4.88 (CRC errors) */
 382		/* disable Automatic Crossover */
 383
 384		ctrl &= ~PHY_M_PC_MDIX_MSK;
 385	}
 386
 387	gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 388
 389	/* special setup for PHY 88E1112 Fiber */
 390	if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
 391		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 392
 393		/* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
 394		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
 395		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 396		ctrl &= ~PHY_M_MAC_MD_MSK;
 397		ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
 398		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 399
 400		if (hw->pmd_type  == 'P') {
 401			/* select page 1 to access Fiber registers */
 402			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
 403
 404			/* for SFP-module set SIGDET polarity to low */
 405			ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 406			ctrl |= PHY_M_FIB_SIGD_POL;
 407			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 408		}
 409
 410		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 411	}
 412
 413	ctrl = PHY_CT_RESET;
 414	ct1000 = 0;
 415	adv = PHY_AN_CSMA;
 416	reg = 0;
 417
 418	if (sky2->flags & SKY2_FLAG_AUTO_SPEED) {
 419		if (sky2_is_copper(hw)) {
 420			if (sky2->advertising & ADVERTISED_1000baseT_Full)
 421				ct1000 |= PHY_M_1000C_AFD;
 422			if (sky2->advertising & ADVERTISED_1000baseT_Half)
 423				ct1000 |= PHY_M_1000C_AHD;
 424			if (sky2->advertising & ADVERTISED_100baseT_Full)
 425				adv |= PHY_M_AN_100_FD;
 426			if (sky2->advertising & ADVERTISED_100baseT_Half)
 427				adv |= PHY_M_AN_100_HD;
 428			if (sky2->advertising & ADVERTISED_10baseT_Full)
 429				adv |= PHY_M_AN_10_FD;
 430			if (sky2->advertising & ADVERTISED_10baseT_Half)
 431				adv |= PHY_M_AN_10_HD;
 432
 433		} else {	/* special defines for FIBER (88E1040S only) */
 434			if (sky2->advertising & ADVERTISED_1000baseT_Full)
 435				adv |= PHY_M_AN_1000X_AFD;
 436			if (sky2->advertising & ADVERTISED_1000baseT_Half)
 437				adv |= PHY_M_AN_1000X_AHD;
 438		}
 439
 440		/* Restart Auto-negotiation */
 441		ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
 442	} else {
 443		/* forced speed/duplex settings */
 444		ct1000 = PHY_M_1000C_MSE;
 445
 446		/* Disable auto update for duplex flow control and duplex */
 447		reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS;
 448
 449		switch (sky2->speed) {
 450		case SPEED_1000:
 451			ctrl |= PHY_CT_SP1000;
 452			reg |= GM_GPCR_SPEED_1000;
 453			break;
 454		case SPEED_100:
 455			ctrl |= PHY_CT_SP100;
 456			reg |= GM_GPCR_SPEED_100;
 457			break;
 458		}
 459
 460		if (sky2->duplex == DUPLEX_FULL) {
 461			reg |= GM_GPCR_DUP_FULL;
 462			ctrl |= PHY_CT_DUP_MD;
 463		} else if (sky2->speed < SPEED_1000)
 464			sky2->flow_mode = FC_NONE;
 465	}
 466
 467	if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) {
 468		if (sky2_is_copper(hw))
 469			adv |= copper_fc_adv[sky2->flow_mode];
 470		else
 471			adv |= fiber_fc_adv[sky2->flow_mode];
 472	} else {
 473		reg |= GM_GPCR_AU_FCT_DIS;
 474		reg |= gm_fc_disable[sky2->flow_mode];
 475
 476		/* Forward pause packets to GMAC? */
 477		if (sky2->flow_mode & FC_RX)
 478			sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
 479		else
 480			sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
 481	}
 482
 483	gma_write16(hw, port, GM_GP_CTRL, reg);
 484
 485	if (hw->flags & SKY2_HW_GIGABIT)
 486		gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
 487
 488	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
 489	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
 490
 491	/* Setup Phy LED's */
 492	ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
 493	ledover = 0;
 494
 495	switch (hw->chip_id) {
 496	case CHIP_ID_YUKON_FE:
 497		/* on 88E3082 these bits are at 11..9 (shifted left) */
 498		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
 499
 500		ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
 501
 502		/* delete ACT LED control bits */
 503		ctrl &= ~PHY_M_FELP_LED1_MSK;
 504		/* change ACT LED control to blink mode */
 505		ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
 506		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
 507		break;
 508
 509	case CHIP_ID_YUKON_FE_P:
 510		/* Enable Link Partner Next Page */
 511		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 512		ctrl |= PHY_M_PC_ENA_LIP_NP;
 513
 514		/* disable Energy Detect and enable scrambler */
 515		ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
 516		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 517
 518		/* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
 519		ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
 520			PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
 521			PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
 522
 523		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
 524		break;
 525
 526	case CHIP_ID_YUKON_XL:
 527		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 528
 529		/* select page 3 to access LED control register */
 530		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
 531
 532		/* set LED Function Control register */
 533		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
 534			     (PHY_M_LEDC_LOS_CTRL(1) |	/* LINK/ACT */
 535			      PHY_M_LEDC_INIT_CTRL(7) |	/* 10 Mbps */
 536			      PHY_M_LEDC_STA1_CTRL(7) |	/* 100 Mbps */
 537			      PHY_M_LEDC_STA0_CTRL(7)));	/* 1000 Mbps */
 538
 539		/* set Polarity Control register */
 540		gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
 541			     (PHY_M_POLC_LS1_P_MIX(4) |
 542			      PHY_M_POLC_IS0_P_MIX(4) |
 543			      PHY_M_POLC_LOS_CTRL(2) |
 544			      PHY_M_POLC_INIT_CTRL(2) |
 545			      PHY_M_POLC_STA1_CTRL(2) |
 546			      PHY_M_POLC_STA0_CTRL(2)));
 547
 548		/* restore page register */
 549		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 550		break;
 551
 552	case CHIP_ID_YUKON_EC_U:
 553	case CHIP_ID_YUKON_EX:
 554	case CHIP_ID_YUKON_SUPR:
 555		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 556
 557		/* select page 3 to access LED control register */
 558		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
 559
 560		/* set LED Function Control register */
 561		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
 562			     (PHY_M_LEDC_LOS_CTRL(1) |	/* LINK/ACT */
 563			      PHY_M_LEDC_INIT_CTRL(8) |	/* 10 Mbps */
 564			      PHY_M_LEDC_STA1_CTRL(7) |	/* 100 Mbps */
 565			      PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
 566
 567		/* set Blink Rate in LED Timer Control Register */
 568		gm_phy_write(hw, port, PHY_MARV_INT_MASK,
 569			     ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
 570		/* restore page register */
 571		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 572		break;
 573
 574	default:
 575		/* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
 576		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
 577
 578		/* turn off the Rx LED (LED_RX) */
 579		ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
 580	}
 581
 582	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
 583		/* apply fixes in PHY AFE */
 584		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
 585
 586		/* increase differential signal amplitude in 10BASE-T */
 587		gm_phy_write(hw, port, 0x18, 0xaa99);
 588		gm_phy_write(hw, port, 0x17, 0x2011);
 589
 590		if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
 591			/* fix for IEEE A/B Symmetry failure in 1000BASE-T */
 592			gm_phy_write(hw, port, 0x18, 0xa204);
 593			gm_phy_write(hw, port, 0x17, 0x2002);
 594		}
 595
 596		/* set page register to 0 */
 597		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 598	} else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
 599		   hw->chip_rev == CHIP_REV_YU_FE2_A0) {
 600		/* apply workaround for integrated resistors calibration */
 601		gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
 602		gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
 603	} else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
 604		/* apply fixes in PHY AFE */
 605		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
 606
 607		/* apply RDAC termination workaround */
 608		gm_phy_write(hw, port, 24, 0x2800);
 609		gm_phy_write(hw, port, 23, 0x2001);
 610
 611		/* set page register back to 0 */
 612		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 613	} else if (hw->chip_id != CHIP_ID_YUKON_EX &&
 614		   hw->chip_id < CHIP_ID_YUKON_SUPR) {
 615		/* no effect on Yukon-XL */
 616		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
 617
 618		if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) ||
 619		    sky2->speed == SPEED_100) {
 620			/* turn on 100 Mbps LED (LED_LINK100) */
 621			ledover |= PHY_M_LED_MO_100(MO_LED_ON);
 622		}
 623
 624		if (ledover)
 625			gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
 626
 627	} else if (hw->chip_id == CHIP_ID_YUKON_PRM &&
 628		   (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) {
 629		int i;
 630		/* This a phy register setup workaround copied from vendor driver. */
 631		static const struct {
 632			u16 reg, val;
 633		} eee_afe[] = {
 634			{ 0x156, 0x58ce },
 635			{ 0x153, 0x99eb },
 636			{ 0x141, 0x8064 },
 637			/* { 0x155, 0x130b },*/
 638			{ 0x000, 0x0000 },
 639			{ 0x151, 0x8433 },
 640			{ 0x14b, 0x8c44 },
 641			{ 0x14c, 0x0f90 },
 642			{ 0x14f, 0x39aa },
 643			/* { 0x154, 0x2f39 },*/
 644			{ 0x14d, 0xba33 },
 645			{ 0x144, 0x0048 },
 646			{ 0x152, 0x2010 },
 647			/* { 0x158, 0x1223 },*/
 648			{ 0x140, 0x4444 },
 649			{ 0x154, 0x2f3b },
 650			{ 0x158, 0xb203 },
 651			{ 0x157, 0x2029 },
 652		};
 653
 654		/* Start Workaround for OptimaEEE Rev.Z0 */
 655		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb);
 656
 657		gm_phy_write(hw, port,  1, 0x4099);
 658		gm_phy_write(hw, port,  3, 0x1120);
 659		gm_phy_write(hw, port, 11, 0x113c);
 660		gm_phy_write(hw, port, 14, 0x8100);
 661		gm_phy_write(hw, port, 15, 0x112a);
 662		gm_phy_write(hw, port, 17, 0x1008);
 663
 664		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc);
 665		gm_phy_write(hw, port,  1, 0x20b0);
 666
 667		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
 668
 669		for (i = 0; i < ARRAY_SIZE(eee_afe); i++) {
 670			/* apply AFE settings */
 671			gm_phy_write(hw, port, 17, eee_afe[i].val);
 672			gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13);
 673		}
 674
 675		/* End Workaround for OptimaEEE */
 676		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 677
 678		/* Enable 10Base-Te (EEE) */
 679		if (hw->chip_id >= CHIP_ID_YUKON_PRM) {
 680			reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 681			gm_phy_write(hw, port, PHY_MARV_EXT_CTRL,
 682				     reg | PHY_M_10B_TE_ENABLE);
 683		}
 684	}
 685
 686	/* Enable phy interrupt on auto-negotiation complete (or link up) */
 687	if (sky2->flags & SKY2_FLAG_AUTO_SPEED)
 688		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
 689	else
 690		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
 691}
 692
 693static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
 694static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
 695
 696static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
 697{
 698	u32 reg1;
 699
 700	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 701	reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
 702	reg1 &= ~phy_power[port];
 703
 704	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
 705		reg1 |= coma_mode[port];
 706
 707	sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
 708	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 709	sky2_pci_read32(hw, PCI_DEV_REG1);
 710
 711	if (hw->chip_id == CHIP_ID_YUKON_FE)
 712		gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
 713	else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
 714		sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 715}
 716
 717static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
 718{
 719	u32 reg1;
 720	u16 ctrl;
 721
 722	/* release GPHY Control reset */
 723	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 724
 725	/* release GMAC reset */
 726	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 727
 728	if (hw->flags & SKY2_HW_NEWER_PHY) {
 729		/* select page 2 to access MAC control register */
 730		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
 731
 732		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 733		/* allow GMII Power Down */
 734		ctrl &= ~PHY_M_MAC_GMIF_PUP;
 735		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 736
 737		/* set page register back to 0 */
 738		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 739	}
 740
 741	/* setup General Purpose Control Register */
 742	gma_write16(hw, port, GM_GP_CTRL,
 743		    GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 |
 744		    GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |
 745		    GM_GPCR_AU_SPD_DIS);
 746
 747	if (hw->chip_id != CHIP_ID_YUKON_EC) {
 748		if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
 749			/* select page 2 to access MAC control register */
 750			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
 751
 752			ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 753			/* enable Power Down */
 754			ctrl |= PHY_M_PC_POW_D_ENA;
 755			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 756
 757			/* set page register back to 0 */
 758			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 759		}
 760
 761		/* set IEEE compatible Power Down Mode (dev. #4.99) */
 762		gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
 763	}
 764
 765	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 766	reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
 767	reg1 |= phy_power[port];		/* set PHY to PowerDown/COMA Mode */
 768	sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
 769	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 770}
 771
 772/* configure IPG according to used link speed */
 773static void sky2_set_ipg(struct sky2_port *sky2)
 774{
 775	u16 reg;
 776
 777	reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE);
 778	reg &= ~GM_SMOD_IPG_MSK;
 779	if (sky2->speed > SPEED_100)
 780		reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
 781	else
 782		reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
 783	gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg);
 784}
 785
 786/* Enable Rx/Tx */
 787static void sky2_enable_rx_tx(struct sky2_port *sky2)
 788{
 789	struct sky2_hw *hw = sky2->hw;
 790	unsigned port = sky2->port;
 791	u16 reg;
 792
 793	reg = gma_read16(hw, port, GM_GP_CTRL);
 794	reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
 795	gma_write16(hw, port, GM_GP_CTRL, reg);
 796}
 797
 798/* Force a renegotiation */
 799static void sky2_phy_reinit(struct sky2_port *sky2)
 800{
 801	spin_lock_bh(&sky2->phy_lock);
 802	sky2_phy_init(sky2->hw, sky2->port);
 803	sky2_enable_rx_tx(sky2);
 804	spin_unlock_bh(&sky2->phy_lock);
 805}
 806
 807/* Put device in state to listen for Wake On Lan */
 808static void sky2_wol_init(struct sky2_port *sky2)
 809{
 810	struct sky2_hw *hw = sky2->hw;
 811	unsigned port = sky2->port;
 812	enum flow_control save_mode;
 813	u16 ctrl;
 814
 815	/* Bring hardware out of reset */
 816	sky2_write16(hw, B0_CTST, CS_RST_CLR);
 817	sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
 818
 819	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 820	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 821
 822	/* Force to 10/100
 823	 * sky2_reset will re-enable on resume
 824	 */
 825	save_mode = sky2->flow_mode;
 826	ctrl = sky2->advertising;
 827
 828	sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
 829	sky2->flow_mode = FC_NONE;
 830
 831	spin_lock_bh(&sky2->phy_lock);
 832	sky2_phy_power_up(hw, port);
 833	sky2_phy_init(hw, port);
 834	spin_unlock_bh(&sky2->phy_lock);
 835
 836	sky2->flow_mode = save_mode;
 837	sky2->advertising = ctrl;
 838
 839	/* Set GMAC to no flow control and auto update for speed/duplex */
 840	gma_write16(hw, port, GM_GP_CTRL,
 841		    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
 842		    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
 843
 844	/* Set WOL address */
 845	memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
 846		    sky2->netdev->dev_addr, ETH_ALEN);
 847
 848	/* Turn on appropriate WOL control bits */
 849	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
 850	ctrl = 0;
 851	if (sky2->wol & WAKE_PHY)
 852		ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
 853	else
 854		ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
 855
 856	if (sky2->wol & WAKE_MAGIC)
 857		ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
 858	else
 859		ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
 860
 861	ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
 862	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
 863
 864	/* Disable PiG firmware */
 865	sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
 866
 867	/* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */
 868	if (legacy_pme) {
 869		u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
 870		reg1 |= PCI_Y2_PME_LEGACY;
 871		sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
 872	}
 873
 874	/* block receiver */
 875	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
 876	sky2_read32(hw, B0_CTST);
 877}
 878
 879static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
 880{
 881	struct net_device *dev = hw->dev[port];
 882
 883	if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
 884	      hw->chip_rev != CHIP_REV_YU_EX_A0) ||
 885	     hw->chip_id >= CHIP_ID_YUKON_FE_P) {
 886		/* Yukon-Extreme B0 and further Extreme devices */
 887		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
 888	} else if (dev->mtu > ETH_DATA_LEN) {
 889		/* set Tx GMAC FIFO Almost Empty Threshold */
 890		sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
 891			     (ECU_JUMBO_WM << 16) | ECU_AE_THR);
 892
 893		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
 894	} else
 895		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
 896}
 897
 898static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 899{
 900	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 901	u16 reg;
 902	u32 rx_reg;
 903	int i;
 904	const u8 *addr = hw->dev[port]->dev_addr;
 905
 906	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
 907	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 908
 909	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 910
 911	if (hw->chip_id == CHIP_ID_YUKON_XL &&
 912	    hw->chip_rev == CHIP_REV_YU_XL_A0 &&
 913	    port == 1) {
 914		/* WA DEV_472 -- looks like crossed wires on port 2 */
 915		/* clear GMAC 1 Control reset */
 916		sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
 917		do {
 918			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
 919			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
 920		} while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
 921			 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
 922			 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
 923	}
 924
 925	sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
 926
 927	/* Enable Transmit FIFO Underrun */
 928	sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
 929
 930	spin_lock_bh(&sky2->phy_lock);
 931	sky2_phy_power_up(hw, port);
 932	sky2_phy_init(hw, port);
 933	spin_unlock_bh(&sky2->phy_lock);
 934
 935	/* MIB clear */
 936	reg = gma_read16(hw, port, GM_PHY_ADDR);
 937	gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
 938
 939	for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
 940		gma_read16(hw, port, i);
 941	gma_write16(hw, port, GM_PHY_ADDR, reg);
 942
 943	/* transmit control */
 944	gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
 945
 946	/* receive control reg: unicast + multicast + no FCS  */
 947	gma_write16(hw, port, GM_RX_CTRL,
 948		    GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
 949
 950	/* transmit flow control */
 951	gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
 952
 953	/* transmit parameter */
 954	gma_write16(hw, port, GM_TX_PARAM,
 955		    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
 956		    TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
 957		    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
 958		    TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
 959
 960	/* serial mode register */
 961	reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
 962		GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000);
 963
 964	if (hw->dev[port]->mtu > ETH_DATA_LEN)
 965		reg |= GM_SMOD_JUMBO_ENA;
 966
 967	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
 968	    hw->chip_rev == CHIP_REV_YU_EC_U_B1)
 969		reg |= GM_NEW_FLOW_CTRL;
 970
 971	gma_write16(hw, port, GM_SERIAL_MODE, reg);
 972
 973	/* virtual address for data */
 974	gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
 975
 976	/* physical address: used for pause frames */
 977	gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
 978
 979	/* ignore counter overflows */
 980	gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
 981	gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
 982	gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
 983
 984	/* Configure Rx MAC FIFO */
 985	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
 986	rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 987	if (hw->chip_id == CHIP_ID_YUKON_EX ||
 988	    hw->chip_id == CHIP_ID_YUKON_FE_P)
 989		rx_reg |= GMF_RX_OVER_ON;
 990
 991	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
 992
 993	if (hw->chip_id == CHIP_ID_YUKON_XL) {
 994		/* Hardware errata - clear flush mask */
 995		sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
 996	} else {
 997		/* Flush Rx MAC FIFO on any flow control or error */
 998		sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
 999	}
1000
1001	/* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug  */
1002	reg = RX_GMF_FL_THR_DEF + 1;
1003	/* Another magic mystery workaround from sk98lin */
1004	if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
1005	    hw->chip_rev == CHIP_REV_YU_FE2_A0)
1006		reg = 0x178;
1007	sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
1008
1009	/* Configure Tx MAC FIFO */
1010	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1011	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1012
1013	/* On chips without ram buffer, pause is controlled by MAC level */
1014	if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
1015		/* Pause threshold is scaled by 8 in bytes */
1016		if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
1017		    hw->chip_rev == CHIP_REV_YU_FE2_A0)
1018			reg = 1568 / 8;
1019		else
1020			reg = 1024 / 8;
1021		sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
1022		sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
1023
1024		sky2_set_tx_stfwd(hw, port);
1025	}
1026
1027	if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
1028	    hw->chip_rev == CHIP_REV_YU_FE2_A0) {
1029		/* disable dynamic watermark */
1030		reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
1031		reg &= ~TX_DYN_WM_ENA;
1032		sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
1033	}
1034}
1035
1036/* Assign Ram Buffer allocation to queue */
1037static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
1038{
1039	u32 end;
1040
1041	/* convert from K bytes to qwords used for hw register */
1042	start *= 1024/8;
1043	space *= 1024/8;
1044	end = start + space - 1;
1045
1046	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
1047	sky2_write32(hw, RB_ADDR(q, RB_START), start);
1048	sky2_write32(hw, RB_ADDR(q, RB_END), end);
1049	sky2_write32(hw, RB_ADDR(q, RB_WP), start);
1050	sky2_write32(hw, RB_ADDR(q, RB_RP), start);
1051
1052	if (q == Q_R1 || q == Q_R2) {
1053		u32 tp = space - space/4;
1054
1055		/* On receive queue's set the thresholds
1056		 * give receiver priority when > 3/4 full
1057		 * send pause when down to 2K
1058		 */
1059		sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
1060		sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
1061
1062		tp = space - 8192/8;
1063		sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
1064		sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
1065	} else {
1066		/* Enable store & forward on Tx queue's because
1067		 * Tx FIFO is only 1K on Yukon
1068		 */
1069		sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
1070	}
1071
1072	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
1073	sky2_read8(hw, RB_ADDR(q, RB_CTRL));
1074}
1075
1076/* Setup Bus Memory Interface */
1077static void sky2_qset(struct sky2_hw *hw, u16 q)
1078{
1079	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
1080	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
1081	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
1082	sky2_write32(hw, Q_ADDR(q, Q_WM),  BMU_WM_DEFAULT);
1083}
1084
1085/* Setup prefetch unit registers. This is the interface between
1086 * hardware and driver list elements
1087 */
1088static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
1089			       dma_addr_t addr, u32 last)
1090{
1091	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1092	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
1093	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr));
1094	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr));
1095	sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
1096	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
1097
1098	sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
1099}
1100
1101static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
1102{
1103	struct sky2_tx_le *le = sky2->tx_le + *slot;
1104
1105	*slot = RING_NEXT(*slot, sky2->tx_ring_size);
1106	le->ctrl = 0;
1107	return le;
1108}
1109
1110static void tx_init(struct sky2_port *sky2)
1111{
1112	struct sky2_tx_le *le;
1113
1114	sky2->tx_prod = sky2->tx_cons = 0;
1115	sky2->tx_tcpsum = 0;
1116	sky2->tx_last_mss = 0;
1117	netdev_reset_queue(sky2->netdev);
1118
1119	le = get_tx_le(sky2, &sky2->tx_prod);
1120	le->addr = 0;
1121	le->opcode = OP_ADDR64 | HW_OWNER;
1122	sky2->tx_last_upper = 0;
1123}
1124
1125/* Update chip's next pointer */
1126static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
1127{
1128	/* Make sure write' to descriptors are complete before we tell hardware */
1129	wmb();
1130	sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
1131}
1132
1133
1134static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
1135{
1136	struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
1137	sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
1138	le->ctrl = 0;
1139	return le;
1140}
1141
1142static unsigned sky2_get_rx_threshold(struct sky2_port *sky2)
1143{
1144	unsigned size;
1145
1146	/* Space needed for frame data + headers rounded up */
1147	size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1148
1149	/* Stopping point for hardware truncation */
1150	return (size - 8) / sizeof(u32);
1151}
1152
1153static unsigned sky2_get_rx_data_size(struct sky2_port *sky2)
1154{
1155	struct rx_ring_info *re;
1156	unsigned size;
1157
1158	/* Space needed for frame data + headers rounded up */
1159	size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1160
1161	sky2->rx_nfrags = size >> PAGE_SHIFT;
1162	BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
1163
1164	/* Compute residue after pages */
1165	size -= sky2->rx_nfrags << PAGE_SHIFT;
1166
1167	/* Optimize to handle small packets and headers */
1168	if (size < copybreak)
1169		size = copybreak;
1170	if (size < ETH_HLEN)
1171		size = ETH_HLEN;
1172
1173	return size;
1174}
1175
1176/* Build description to hardware for one receive segment */
1177static void sky2_rx_add(struct sky2_port *sky2, u8 op,
1178			dma_addr_t map, unsigned len)
1179{
1180	struct sky2_rx_le *le;
1181
1182	if (sizeof(dma_addr_t) > sizeof(u32)) {
1183		le = sky2_next_rx(sky2);
1184		le->addr = cpu_to_le32(upper_32_bits(map));
1185		le->opcode = OP_ADDR64 | HW_OWNER;
1186	}
1187
1188	le = sky2_next_rx(sky2);
1189	le->addr = cpu_to_le32(lower_32_bits(map));
1190	le->length = cpu_to_le16(len);
1191	le->opcode = op | HW_OWNER;
1192}
1193
1194/* Build description to hardware for one possibly fragmented skb */
1195static void sky2_rx_submit(struct sky2_port *sky2,
1196			   const struct rx_ring_info *re)
1197{
1198	int i;
1199
1200	sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
1201
1202	for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
1203		sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
1204}
1205
1206
1207static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1208			    unsigned size)
1209{
1210	struct sk_buff *skb = re->skb;
1211	int i;
1212
1213	re->data_addr = dma_map_single(&pdev->dev, skb->data, size,
1214				       DMA_FROM_DEVICE);
1215	if (dma_mapping_error(&pdev->dev, re->data_addr))
1216		goto mapping_error;
1217
1218	dma_unmap_len_set(re, data_size, size);
1219
1220	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1221		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1222
1223		re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
1224						    skb_frag_size(frag),
1225						    DMA_FROM_DEVICE);
1226
1227		if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
1228			goto map_page_error;
1229	}
1230	return 0;
1231
1232map_page_error:
1233	while (--i >= 0) {
1234		dma_unmap_page(&pdev->dev, re->frag_addr[i],
1235			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
1236			       DMA_FROM_DEVICE);
1237	}
1238
1239	dma_unmap_single(&pdev->dev, re->data_addr,
1240			 dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
1241
1242mapping_error:
1243	if (net_ratelimit())
1244		dev_warn(&pdev->dev, "%s: rx mapping error\n",
1245			 skb->dev->name);
1246	return -EIO;
1247}
1248
1249static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
1250{
1251	struct sk_buff *skb = re->skb;
1252	int i;
1253
1254	dma_unmap_single(&pdev->dev, re->data_addr,
1255			 dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
1256
1257	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1258		dma_unmap_page(&pdev->dev, re->frag_addr[i],
1259			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
1260			       DMA_FROM_DEVICE);
1261}
1262
1263/* Tell chip where to start receive checksum.
1264 * Actually has two checksums, but set both same to avoid possible byte
1265 * order problems.
1266 */
1267static void rx_set_checksum(struct sky2_port *sky2)
1268{
1269	struct sky2_rx_le *le = sky2_next_rx(sky2);
1270
1271	le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
1272	le->ctrl = 0;
1273	le->opcode = OP_TCPSTART | HW_OWNER;
1274
1275	sky2_write32(sky2->hw,
1276		     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1277		     (sky2->netdev->features & NETIF_F_RXCSUM)
1278		     ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1279}
1280
1281/* Enable/disable receive hash calculation (RSS) */
1282static void rx_set_rss(struct net_device *dev, netdev_features_t features)
1283{
1284	struct sky2_port *sky2 = netdev_priv(dev);
1285	struct sky2_hw *hw = sky2->hw;
1286	int i, nkeys = 4;
1287
1288	/* Supports IPv6 and other modes */
1289	if (hw->flags & SKY2_HW_NEW_LE) {
1290		nkeys = 10;
1291		sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
1292	}
1293
1294	/* Program RSS initial values */
1295	if (features & NETIF_F_RXHASH) {
1296		u32 rss_key[10];
1297
1298		netdev_rss_key_fill(rss_key, sizeof(rss_key));
1299		for (i = 0; i < nkeys; i++)
1300			sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
1301				     rss_key[i]);
1302
1303		/* Need to turn on (undocumented) flag to make hashing work  */
1304		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
1305			     RX_STFW_ENA);
1306
1307		sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1308			     BMU_ENA_RX_RSS_HASH);
1309	} else
1310		sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1311			     BMU_DIS_RX_RSS_HASH);
1312}
1313
1314/*
1315 * The RX Stop command will not work for Yukon-2 if the BMU does not
1316 * reach the end of packet and since we can't make sure that we have
1317 * incoming data, we must reset the BMU while it is not doing a DMA
1318 * transfer. Since it is possible that the RX path is still active,
1319 * the RX RAM buffer will be stopped first, so any possible incoming
1320 * data will not trigger a DMA. After the RAM buffer is stopped, the
1321 * BMU is polled until any DMA in progress is ended and only then it
1322 * will be reset.
1323 */
1324static void sky2_rx_stop(struct sky2_port *sky2)
1325{
1326	struct sky2_hw *hw = sky2->hw;
1327	unsigned rxq = rxqaddr[sky2->port];
1328	int i;
1329
1330	/* disable the RAM Buffer receive queue */
1331	sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
1332
1333	for (i = 0; i < 0xffff; i++)
1334		if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
1335		    == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
1336			goto stopped;
1337
1338	netdev_warn(sky2->netdev, "receiver stop failed\n");
1339stopped:
1340	sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
1341
1342	/* reset the Rx prefetch unit */
1343	sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1344}
1345
1346/* Clean out receive buffer area, assumes receiver hardware stopped */
1347static void sky2_rx_clean(struct sky2_port *sky2)
1348{
1349	unsigned i;
1350
1351	if (sky2->rx_le)
1352		memset(sky2->rx_le, 0, RX_LE_BYTES);
1353
1354	for (i = 0; i < sky2->rx_pending; i++) {
1355		struct rx_ring_info *re = sky2->rx_ring + i;
1356
1357		if (re->skb) {
1358			sky2_rx_unmap_skb(sky2->hw->pdev, re);
1359			kfree_skb(re->skb);
1360			re->skb = NULL;
1361		}
1362	}
1363}
1364
1365/* Basic MII support */
1366static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1367{
1368	struct mii_ioctl_data *data = if_mii(ifr);
1369	struct sky2_port *sky2 = netdev_priv(dev);
1370	struct sky2_hw *hw = sky2->hw;
1371	int err = -EOPNOTSUPP;
1372
1373	if (!netif_running(dev))
1374		return -ENODEV;	/* Phy still in reset */
1375
1376	switch (cmd) {
1377	case SIOCGMIIPHY:
1378		data->phy_id = PHY_ADDR_MARV;
1379
1380		fallthrough;
1381	case SIOCGMIIREG: {
1382		u16 val = 0;
1383
1384		spin_lock_bh(&sky2->phy_lock);
1385		err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
1386		spin_unlock_bh(&sky2->phy_lock);
1387
1388		data->val_out = val;
1389		break;
1390	}
1391
1392	case SIOCSMIIREG:
1393		spin_lock_bh(&sky2->phy_lock);
1394		err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
1395				   data->val_in);
1396		spin_unlock_bh(&sky2->phy_lock);
1397		break;
1398	}
1399	return err;
1400}
1401
1402#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
1403
1404static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
1405{
1406	struct sky2_port *sky2 = netdev_priv(dev);
1407	struct sky2_hw *hw = sky2->hw;
1408	u16 port = sky2->port;
1409
1410	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1411		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1412			     RX_VLAN_STRIP_ON);
1413	else
1414		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1415			     RX_VLAN_STRIP_OFF);
1416
1417	if (features & NETIF_F_HW_VLAN_CTAG_TX) {
1418		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1419			     TX_VLAN_TAG_ON);
1420
1421		dev->vlan_features |= SKY2_VLAN_OFFLOADS;
1422	} else {
1423		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1424			     TX_VLAN_TAG_OFF);
1425
1426		/* Can't do transmit offload of vlan without hw vlan */
1427		dev->vlan_features &= ~SKY2_VLAN_OFFLOADS;
1428	}
1429}
1430
1431/* Amount of required worst case padding in rx buffer */
1432static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
1433{
1434	return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2;
1435}
1436
1437/*
1438 * Allocate an skb for receiving. If the MTU is large enough
1439 * make the skb non-linear with a fragment list of pages.
1440 */
1441static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp)
1442{
1443	struct sk_buff *skb;
1444	int i;
1445
1446	skb = __netdev_alloc_skb(sky2->netdev,
1447				 sky2->rx_data_size + sky2_rx_pad(sky2->hw),
1448				 gfp);
1449	if (!skb)
1450		goto nomem;
1451
1452	if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
1453		unsigned char *start;
1454		/*
1455		 * Workaround for a bug in FIFO that cause hang
1456		 * if the FIFO if the receive buffer is not 64 byte aligned.
1457		 * The buffer returned from netdev_alloc_skb is
1458		 * aligned except if slab debugging is enabled.
1459		 */
1460		start = PTR_ALIGN(skb->data, 8);
1461		skb_reserve(skb, start - skb->data);
1462	} else
1463		skb_reserve(skb, NET_IP_ALIGN);
1464
1465	for (i = 0; i < sky2->rx_nfrags; i++) {
1466		struct page *page = alloc_page(gfp);
1467
1468		if (!page)
1469			goto free_partial;
1470		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
1471	}
1472
1473	return skb;
1474free_partial:
1475	kfree_skb(skb);
1476nomem:
1477	return NULL;
1478}
1479
1480static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
1481{
1482	sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
1483}
1484
1485static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
1486{
1487	struct sky2_hw *hw = sky2->hw;
1488	unsigned i;
1489
1490	sky2->rx_data_size = sky2_get_rx_data_size(sky2);
1491
1492	/* Fill Rx ring */
1493	for (i = 0; i < sky2->rx_pending; i++) {
1494		struct rx_ring_info *re = sky2->rx_ring + i;
1495
1496		re->skb = sky2_rx_alloc(sky2, GFP_KERNEL);
1497		if (!re->skb)
1498			return -ENOMEM;
1499
1500		if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
1501			dev_kfree_skb(re->skb);
1502			re->skb = NULL;
1503			return -ENOMEM;
1504		}
1505	}
1506	return 0;
1507}
1508
1509/*
1510 * Setup receiver buffer pool.
1511 * Normal case this ends up creating one list element for skb
1512 * in the receive ring. Worst case if using large MTU and each
1513 * allocation falls on a different 64 bit region, that results
1514 * in 6 list elements per ring entry.
1515 * One element is used for checksum enable/disable, and one
1516 * extra to avoid wrap.
1517 */
1518static void sky2_rx_start(struct sky2_port *sky2)
1519{
1520	struct sky2_hw *hw = sky2->hw;
1521	struct rx_ring_info *re;
1522	unsigned rxq = rxqaddr[sky2->port];
1523	unsigned i, thresh;
1524
1525	sky2->rx_put = sky2->rx_next = 0;
1526	sky2_qset(hw, rxq);
1527
1528	/* On PCI express lowering the watermark gives better performance */
1529	if (pci_is_pcie(hw->pdev))
1530		sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
1531
1532	/* These chips have no ram buffer?
1533	 * MAC Rx RAM Read is controlled by hardware
1534	 */
1535	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1536	    hw->chip_rev > CHIP_REV_YU_EC_U_A0)
1537		sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
1538
1539	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
1540
1541	if (!(hw->flags & SKY2_HW_NEW_LE))
1542		rx_set_checksum(sky2);
1543
1544	if (!(hw->flags & SKY2_HW_RSS_BROKEN))
1545		rx_set_rss(sky2->netdev, sky2->netdev->features);
1546
1547	/* submit Rx ring */
1548	for (i = 0; i < sky2->rx_pending; i++) {
1549		re = sky2->rx_ring + i;
1550		sky2_rx_submit(sky2, re);
1551	}
1552
1553	/*
1554	 * The receiver hangs if it receives frames larger than the
1555	 * packet buffer. As a workaround, truncate oversize frames, but
1556	 * the register is limited to 9 bits, so if you do frames > 2052
1557	 * you better get the MTU right!
1558	 */
1559	thresh = sky2_get_rx_threshold(sky2);
1560	if (thresh > 0x1ff)
1561		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1562	else {
1563		sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
1564		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1565	}
1566
1567	/* Tell chip about available buffers */
1568	sky2_rx_update(sky2, rxq);
1569
1570	if (hw->chip_id == CHIP_ID_YUKON_EX ||
1571	    hw->chip_id == CHIP_ID_YUKON_SUPR) {
1572		/*
1573		 * Disable flushing of non ASF packets;
1574		 * must be done after initializing the BMUs;
1575		 * drivers without ASF support should do this too, otherwise
1576		 * it may happen that they cannot run on ASF devices;
1577		 * remember that the MAC FIFO isn't reset during initialization.
1578		 */
1579		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
1580	}
1581
1582	if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
1583		/* Enable RX Home Address & Routing Header checksum fix */
1584		sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
1585			     RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
1586
1587		/* Enable TX Home Address & Routing Header checksum fix */
1588		sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
1589			     TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
1590	}
1591}
1592
1593static int sky2_alloc_buffers(struct sky2_port *sky2)
1594{
1595	struct sky2_hw *hw = sky2->hw;
1596
1597	/* must be power of 2 */
1598	sky2->tx_le = dma_alloc_coherent(&hw->pdev->dev,
1599					 sky2->tx_ring_size * sizeof(struct sky2_tx_le),
1600					 &sky2->tx_le_map, GFP_KERNEL);
1601	if (!sky2->tx_le)
1602		goto nomem;
1603
1604	sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info),
1605				GFP_KERNEL);
1606	if (!sky2->tx_ring)
1607		goto nomem;
1608
1609	sky2->rx_le = dma_alloc_coherent(&hw->pdev->dev, RX_LE_BYTES,
1610					 &sky2->rx_le_map, GFP_KERNEL);
1611	if (!sky2->rx_le)
1612		goto nomem;
1613
1614	sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
1615				GFP_KERNEL);
1616	if (!sky2->rx_ring)
1617		goto nomem;
1618
1619	return sky2_alloc_rx_skbs(sky2);
1620nomem:
1621	return -ENOMEM;
1622}
1623
1624static void sky2_free_buffers(struct sky2_port *sky2)
1625{
1626	struct sky2_hw *hw = sky2->hw;
1627
1628	sky2_rx_clean(sky2);
1629
1630	if (sky2->rx_le) {
1631		dma_free_coherent(&hw->pdev->dev, RX_LE_BYTES, sky2->rx_le,
1632				  sky2->rx_le_map);
1633		sky2->rx_le = NULL;
1634	}
1635	if (sky2->tx_le) {
1636		dma_free_coherent(&hw->pdev->dev,
1637				  sky2->tx_ring_size * sizeof(struct sky2_tx_le),
1638				  sky2->tx_le, sky2->tx_le_map);
1639		sky2->tx_le = NULL;
1640	}
1641	kfree(sky2->tx_ring);
1642	kfree(sky2->rx_ring);
1643
1644	sky2->tx_ring = NULL;
1645	sky2->rx_ring = NULL;
1646}
1647
1648static void sky2_hw_up(struct sky2_port *sky2)
1649{
1650	struct sky2_hw *hw = sky2->hw;
1651	unsigned port = sky2->port;
1652	u32 ramsize;
1653	int cap;
1654	struct net_device *otherdev = hw->dev[sky2->port^1];
1655
1656	tx_init(sky2);
1657
1658	/*
1659	 * On dual port PCI-X card, there is an problem where status
1660	 * can be received out of order due to split transactions
1661	 */
1662	if (otherdev && netif_running(otherdev) &&
1663	    (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
1664		u16 cmd;
1665
1666		cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1667		cmd &= ~PCI_X_CMD_MAX_SPLIT;
1668		sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1669	}
1670
1671	sky2_mac_init(hw, port);
1672
1673	/* Register is number of 4K blocks on internal RAM buffer. */
1674	ramsize = sky2_read8(hw, B2_E_0) * 4;
1675	if (ramsize > 0) {
1676		u32 rxspace;
1677
1678		netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize);
1679		if (ramsize < 16)
1680			rxspace = ramsize / 2;
1681		else
1682			rxspace = 8 + (2*(ramsize - 16))/3;
1683
1684		sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1685		sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1686
1687		/* Make sure SyncQ is disabled */
1688		sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1689			    RB_RST_SET);
1690	}
1691
1692	sky2_qset(hw, txqaddr[port]);
1693
1694	/* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
1695	if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
1696		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
1697
1698	/* Set almost empty threshold */
1699	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1700	    hw->chip_rev == CHIP_REV_YU_EC_U_A0)
1701		sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
1702
1703	sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1704			   sky2->tx_ring_size - 1);
1705
1706	sky2_vlan_mode(sky2->netdev, sky2->netdev->features);
1707	netdev_update_features(sky2->netdev);
1708
1709	sky2_rx_start(sky2);
1710}
1711
1712/* Setup device IRQ and enable napi to process */
1713static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
1714{
1715	struct pci_dev *pdev = hw->pdev;
1716	int err;
1717
1718	err = request_irq(pdev->irq, sky2_intr,
1719			  (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
1720			  name, hw);
1721	if (err)
1722		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
1723	else {
1724		hw->flags |= SKY2_HW_IRQ_SETUP;
1725
1726		napi_enable(&hw->napi);
1727		sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
1728		sky2_read32(hw, B0_IMSK);
1729	}
1730
1731	return err;
1732}
1733
1734
1735/* Bring up network interface. */
1736static int sky2_open(struct net_device *dev)
1737{
1738	struct sky2_port *sky2 = netdev_priv(dev);
1739	struct sky2_hw *hw = sky2->hw;
1740	unsigned port = sky2->port;
1741	u32 imask;
1742	int err;
1743
1744	netif_carrier_off(dev);
1745
1746	err = sky2_alloc_buffers(sky2);
1747	if (err)
1748		goto err_out;
1749
1750	/* With single port, IRQ is setup when device is brought up */
1751	if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name)))
1752		goto err_out;
1753
1754	sky2_hw_up(sky2);
1755
1756	/* Enable interrupts from phy/mac for port */
1757	imask = sky2_read32(hw, B0_IMSK);
1758
1759	if (hw->chip_id == CHIP_ID_YUKON_OPT ||
1760	    hw->chip_id == CHIP_ID_YUKON_PRM ||
1761	    hw->chip_id == CHIP_ID_YUKON_OP_2)
1762		imask |= Y2_IS_PHY_QLNK;	/* enable PHY Quick Link */
1763
1764	imask |= portirq_msk[port];
1765	sky2_write32(hw, B0_IMSK, imask);
1766	sky2_read32(hw, B0_IMSK);
1767
1768	netif_info(sky2, ifup, dev, "enabling interface\n");
1769
1770	return 0;
1771
1772err_out:
1773	sky2_free_buffers(sky2);
1774	return err;
1775}
1776
1777/* Modular subtraction in ring */
1778static inline int tx_inuse(const struct sky2_port *sky2)
1779{
1780	return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1);
1781}
1782
1783/* Number of list elements available for next tx */
1784static inline int tx_avail(const struct sky2_port *sky2)
1785{
1786	return sky2->tx_pending - tx_inuse(sky2);
1787}
1788
1789/* Estimate of number of transmit list elements required */
1790static unsigned tx_le_req(const struct sk_buff *skb)
1791{
1792	unsigned count;
1793
1794	count = (skb_shinfo(skb)->nr_frags + 1)
1795		* (sizeof(dma_addr_t) / sizeof(u32));
1796
1797	if (skb_is_gso(skb))
1798		++count;
1799	else if (sizeof(dma_addr_t) == sizeof(u32))
1800		++count;	/* possible vlan */
1801
1802	if (skb->ip_summed == CHECKSUM_PARTIAL)
1803		++count;
1804
1805	return count;
1806}
1807
1808static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1809{
1810	if (re->flags & TX_MAP_SINGLE)
1811		dma_unmap_single(&pdev->dev, dma_unmap_addr(re, mapaddr),
1812				 dma_unmap_len(re, maplen), DMA_TO_DEVICE);
1813	else if (re->flags & TX_MAP_PAGE)
1814		dma_unmap_page(&pdev->dev, dma_unmap_addr(re, mapaddr),
1815			       dma_unmap_len(re, maplen), DMA_TO_DEVICE);
1816	re->flags = 0;
1817}
1818
1819/*
1820 * Put one packet in ring for transmit.
1821 * A single packet can generate multiple list elements, and
1822 * the number of ring elements will probably be less than the number
1823 * of list elements used.
1824 */
1825static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1826				   struct net_device *dev)
1827{
1828	struct sky2_port *sky2 = netdev_priv(dev);
1829	struct sky2_hw *hw = sky2->hw;
1830	struct sky2_tx_le *le = NULL;
1831	struct tx_ring_info *re;
1832	unsigned i, len;
1833	dma_addr_t mapping;
1834	u32 upper;
1835	u16 slot;
1836	u16 mss;
1837	u8 ctrl;
1838
1839	if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
1840		return NETDEV_TX_BUSY;
1841
1842	len = skb_headlen(skb);
1843	mapping = dma_map_single(&hw->pdev->dev, skb->data, len,
1844				 DMA_TO_DEVICE);
1845
1846	if (dma_mapping_error(&hw->pdev->dev, mapping))
1847		goto mapping_error;
1848
1849	slot = sky2->tx_prod;
1850	netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
1851		     "tx queued, slot %u, len %d\n", slot, skb->len);
1852
1853	/* Send high bits if needed */
1854	upper = upper_32_bits(mapping);
1855	if (upper != sky2->tx_last_upper) {
1856		le = get_tx_le(sky2, &slot);
1857		le->addr = cpu_to_le32(upper);
1858		sky2->tx_last_upper = upper;
1859		le->opcode = OP_ADDR64 | HW_OWNER;
1860	}
1861
1862	/* Check for TCP Segmentation Offload */
1863	mss = skb_shinfo(skb)->gso_size;
1864	if (mss != 0) {
1865
1866		if (!(hw->flags & SKY2_HW_NEW_LE))
1867			mss += skb_tcp_all_headers(skb);
1868
1869		if (mss != sky2->tx_last_mss) {
1870			le = get_tx_le(sky2, &slot);
1871			le->addr = cpu_to_le32(mss);
1872
1873			if (hw->flags & SKY2_HW_NEW_LE)
1874				le->opcode = OP_MSS | HW_OWNER;
1875			else
1876				le->opcode = OP_LRGLEN | HW_OWNER;
1877			sky2->tx_last_mss = mss;
1878		}
1879	}
1880
1881	ctrl = 0;
1882
1883	/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1884	if (skb_vlan_tag_present(skb)) {
1885		if (!le) {
1886			le = get_tx_le(sky2, &slot);
1887			le->addr = 0;
1888			le->opcode = OP_VLAN|HW_OWNER;
1889		} else
1890			le->opcode |= OP_VLAN;
1891		le->length = cpu_to_be16(skb_vlan_tag_get(skb));
1892		ctrl |= INS_VLAN;
1893	}
1894
1895	/* Handle TCP checksum offload */
1896	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1897		/* On Yukon EX (some versions) encoding change. */
1898		if (hw->flags & SKY2_HW_AUTO_TX_SUM)
1899			ctrl |= CALSUM;	/* auto checksum */
1900		else {
1901			const unsigned offset = skb_transport_offset(skb);
1902			u32 tcpsum;
1903
1904			tcpsum = offset << 16;			/* sum start */
1905			tcpsum |= offset + skb->csum_offset;	/* sum write */
1906
1907			ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1908			if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1909				ctrl |= UDPTCP;
1910
1911			if (tcpsum != sky2->tx_tcpsum) {
1912				sky2->tx_tcpsum = tcpsum;
1913
1914				le = get_tx_le(sky2, &slot);
1915				le->addr = cpu_to_le32(tcpsum);
1916				le->length = 0;	/* initial checksum value */
1917				le->ctrl = 1;	/* one packet */
1918				le->opcode = OP_TCPLISW | HW_OWNER;
1919			}
1920		}
1921	}
1922
1923	re = sky2->tx_ring + slot;
1924	re->flags = TX_MAP_SINGLE;
1925	dma_unmap_addr_set(re, mapaddr, mapping);
1926	dma_unmap_len_set(re, maplen, len);
1927
1928	le = get_tx_le(sky2, &slot);
1929	le->addr = cpu_to_le32(lower_32_bits(mapping));
1930	le->length = cpu_to_le16(len);
1931	le->ctrl = ctrl;
1932	le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1933
1934
1935	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1936		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1937
1938		mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
1939					   skb_frag_size(frag), DMA_TO_DEVICE);
1940
1941		if (dma_mapping_error(&hw->pdev->dev, mapping))
1942			goto mapping_unwind;
1943
1944		upper = upper_32_bits(mapping);
1945		if (upper != sky2->tx_last_upper) {
1946			le = get_tx_le(sky2, &slot);
1947			le->addr = cpu_to_le32(upper);
1948			sky2->tx_last_upper = upper;
1949			le->opcode = OP_ADDR64 | HW_OWNER;
1950		}
1951
1952		re = sky2->tx_ring + slot;
1953		re->flags = TX_MAP_PAGE;
1954		dma_unmap_addr_set(re, mapaddr, mapping);
1955		dma_unmap_len_set(re, maplen, skb_frag_size(frag));
1956
1957		le = get_tx_le(sky2, &slot);
1958		le->addr = cpu_to_le32(lower_32_bits(mapping));
1959		le->length = cpu_to_le16(skb_frag_size(frag));
1960		le->ctrl = ctrl;
1961		le->opcode = OP_BUFFER | HW_OWNER;
1962	}
1963
1964	re->skb = skb;
1965	le->ctrl |= EOP;
1966
1967	sky2->tx_prod = slot;
1968
1969	if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1970		netif_stop_queue(dev);
1971
1972	netdev_sent_queue(dev, skb->len);
1973	sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1974
1975	return NETDEV_TX_OK;
1976
1977mapping_unwind:
1978	for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) {
1979		re = sky2->tx_ring + i;
1980
1981		sky2_tx_unmap(hw->pdev, re);
1982	}
1983
1984mapping_error:
1985	if (net_ratelimit())
1986		dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
1987	dev_kfree_skb_any(skb);
1988	return NETDEV_TX_OK;
1989}
1990
1991/*
1992 * Free ring elements from starting at tx_cons until "done"
1993 *
1994 * NB:
1995 *  1. The hardware will tell us about partial completion of multi-part
1996 *     buffers so make sure not to free skb to early.
1997 *  2. This may run in parallel start_xmit because the it only
1998 *     looks at the tail of the queue of FIFO (tx_cons), not
1999 *     the head (tx_prod)
2000 */
2001static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
2002{
2003	struct net_device *dev = sky2->netdev;
2004	u16 idx;
2005	unsigned int bytes_compl = 0, pkts_compl = 0;
2006
2007	BUG_ON(done >= sky2->tx_ring_size);
2008
2009	for (idx = sky2->tx_cons; idx != done;
2010	     idx = RING_NEXT(idx, sky2->tx_ring_size)) {
2011		struct tx_ring_info *re = sky2->tx_ring + idx;
2012		struct sk_buff *skb = re->skb;
2013
2014		sky2_tx_unmap(sky2->hw->pdev, re);
2015
2016		if (skb) {
2017			netif_printk(sky2, tx_done, KERN_DEBUG, dev,
2018				     "tx done %u\n", idx);
2019
2020			pkts_compl++;
2021			bytes_compl += skb->len;
2022
2023			re->skb = NULL;
2024			dev_kfree_skb_any(skb);
2025
2026			sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
2027		}
2028	}
2029
2030	sky2->tx_cons = idx;
2031	smp_mb();
2032
2033	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2034
2035	u64_stats_update_begin(&sky2->tx_stats.syncp);
2036	sky2->tx_stats.packets += pkts_compl;
2037	sky2->tx_stats.bytes += bytes_compl;
2038	u64_stats_update_end(&sky2->tx_stats.syncp);
2039}
2040
2041static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
2042{
2043	/* Disable Force Sync bit and Enable Alloc bit */
2044	sky2_write8(hw, SK_REG(port, TXA_CTRL),
2045		    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2046
2047	/* Stop Interval Timer and Limit Counter of Tx Arbiter */
2048	sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2049	sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2050
2051	/* Reset the PCI FIFO of the async Tx queue */
2052	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
2053		     BMU_RST_SET | BMU_FIFO_RST);
2054
2055	/* Reset the Tx prefetch units */
2056	sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
2057		     PREF_UNIT_RST_SET);
2058
2059	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2060	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2061
2062	sky2_read32(hw, B0_CTST);
2063}
2064
2065static void sky2_hw_down(struct sky2_port *sky2)
2066{
2067	struct sky2_hw *hw = sky2->hw;
2068	unsigned port = sky2->port;
2069	u16 ctrl;
2070
2071	/* Force flow control off */
2072	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2073
2074	/* Stop transmitter */
2075	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
2076	sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
2077
2078	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2079		     RB_RST_SET | RB_DIS_OP_MD);
2080
2081	ctrl = gma_read16(hw, port, GM_GP_CTRL);
2082	ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
2083	gma_write16(hw, port, GM_GP_CTRL, ctrl);
2084
2085	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
2086
2087	/* Workaround shared GMAC reset */
2088	if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 &&
2089	      port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
2090		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
2091
2092	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2093
2094	/* Force any delayed status interrupt and NAPI */
2095	sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
2096	sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
2097	sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
2098	sky2_read8(hw, STAT_ISR_TIMER_CTRL);
2099
2100	sky2_rx_stop(sky2);
2101
2102	spin_lock_bh(&sky2->phy_lock);
2103	sky2_phy_power_down(hw, port);
2104	spin_unlock_bh(&sky2->phy_lock);
2105
2106	sky2_tx_reset(hw, port);
2107
2108	/* Free any pending frames stuck in HW queue */
2109	sky2_tx_complete(sky2, sky2->tx_prod);
2110}
2111
2112/* Network shutdown */
2113static int sky2_close(struct net_device *dev)
2114{
2115	struct sky2_port *sky2 = netdev_priv(dev);
2116	struct sky2_hw *hw = sky2->hw;
2117
2118	/* Never really got started! */
2119	if (!sky2->tx_le)
2120		return 0;
2121
2122	netif_info(sky2, ifdown, dev, "disabling interface\n");
2123
2124	if (hw->ports == 1) {
2125		sky2_write32(hw, B0_IMSK, 0);
2126		sky2_read32(hw, B0_IMSK);
2127
2128		napi_disable(&hw->napi);
2129		free_irq(hw->pdev->irq, hw);
2130		hw->flags &= ~SKY2_HW_IRQ_SETUP;
2131	} else {
2132		u32 imask;
2133
2134		/* Disable port IRQ */
2135		imask  = sky2_read32(hw, B0_IMSK);
2136		imask &= ~portirq_msk[sky2->port];
2137		sky2_write32(hw, B0_IMSK, imask);
2138		sky2_read32(hw, B0_IMSK);
2139
2140		synchronize_irq(hw->pdev->irq);
2141		napi_synchronize(&hw->napi);
2142	}
2143
2144	sky2_hw_down(sky2);
2145
2146	sky2_free_buffers(sky2);
2147
2148	return 0;
2149}
2150
2151static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
2152{
2153	if (hw->flags & SKY2_HW_FIBRE_PHY)
2154		return SPEED_1000;
2155
2156	if (!(hw->flags & SKY2_HW_GIGABIT)) {
2157		if (aux & PHY_M_PS_SPEED_100)
2158			return SPEED_100;
2159		else
2160			return SPEED_10;
2161	}
2162
2163	switch (aux & PHY_M_PS_SPEED_MSK) {
2164	case PHY_M_PS_SPEED_1000:
2165		return SPEED_1000;
2166	case PHY_M_PS_SPEED_100:
2167		return SPEED_100;
2168	default:
2169		return SPEED_10;
2170	}
2171}
2172
2173static void sky2_link_up(struct sky2_port *sky2)
2174{
2175	struct sky2_hw *hw = sky2->hw;
2176	unsigned port = sky2->port;
2177	static const char *fc_name[] = {
2178		[FC_NONE]	= "none",
2179		[FC_TX]		= "tx",
2180		[FC_RX]		= "rx",
2181		[FC_BOTH]	= "both",
2182	};
2183
2184	sky2_set_ipg(sky2);
2185
2186	sky2_enable_rx_tx(sky2);
2187
2188	gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
2189
2190	netif_carrier_on(sky2->netdev);
2191
2192	mod_timer(&hw->watchdog_timer, jiffies + 1);
2193
2194	/* Turn on link LED */
2195	sky2_write8(hw, SK_REG(port, LNK_LED_REG),
2196		    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
2197
2198	netif_info(sky2, link, sky2->netdev,
2199		   "Link is up at %d Mbps, %s duplex, flow control %s\n",
2200		   sky2->speed,
2201		   sky2->duplex == DUPLEX_FULL ? "full" : "half",
2202		   fc_name[sky2->flow_status]);
2203}
2204
2205static void sky2_link_down(struct sky2_port *sky2)
2206{
2207	struct sky2_hw *hw = sky2->hw;
2208	unsigned port = sky2->port;
2209	u16 reg;
2210
2211	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
2212
2213	reg = gma_read16(hw, port, GM_GP_CTRL);
2214	reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2215	gma_write16(hw, port, GM_GP_CTRL, reg);
2216
2217	netif_carrier_off(sky2->netdev);
2218
2219	/* Turn off link LED */
2220	sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
2221
2222	netif_info(sky2, link, sky2->netdev, "Link is down\n");
2223
2224	sky2_phy_init(hw, port);
2225}
2226
2227static enum flow_control sky2_flow(int rx, int tx)
2228{
2229	if (rx)
2230		return tx ? FC_BOTH : FC_RX;
2231	else
2232		return tx ? FC_TX : FC_NONE;
2233}
2234
2235static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
2236{
2237	struct sky2_hw *hw = sky2->hw;
2238	unsigned port = sky2->port;
2239	u16 advert, lpa;
2240
2241	advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
2242	lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
2243	if (lpa & PHY_M_AN_RF) {
2244		netdev_err(sky2->netdev, "remote fault\n");
2245		return -1;
2246	}
2247
2248	if (!(aux & PHY_M_PS_SPDUP_RES)) {
2249		netdev_err(sky2->netdev, "speed/duplex mismatch\n");
2250		return -1;
2251	}
2252
2253	sky2->speed = sky2_phy_speed(hw, aux);
2254	sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2255
2256	/* Since the pause result bits seem to in different positions on
2257	 * different chips. look at registers.
2258	 */
2259	if (hw->flags & SKY2_HW_FIBRE_PHY) {
2260		/* Shift for bits in fiber PHY */
2261		advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
2262		lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
2263
2264		if (advert & ADVERTISE_1000XPAUSE)
2265			advert |= ADVERTISE_PAUSE_CAP;
2266		if (advert & ADVERTISE_1000XPSE_ASYM)
2267			advert |= ADVERTISE_PAUSE_ASYM;
2268		if (lpa & LPA_1000XPAUSE)
2269			lpa |= LPA_PAUSE_CAP;
2270		if (lpa & LPA_1000XPAUSE_ASYM)
2271			lpa |= LPA_PAUSE_ASYM;
2272	}
2273
2274	sky2->flow_status = FC_NONE;
2275	if (advert & ADVERTISE_PAUSE_CAP) {
2276		if (lpa & LPA_PAUSE_CAP)
2277			sky2->flow_status = FC_BOTH;
2278		else if (advert & ADVERTISE_PAUSE_ASYM)
2279			sky2->flow_status = FC_RX;
2280	} else if (advert & ADVERTISE_PAUSE_ASYM) {
2281		if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
2282			sky2->flow_status = FC_TX;
2283	}
2284
2285	if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 &&
2286	    !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
2287		sky2->flow_status = FC_NONE;
2288
2289	if (sky2->flow_status & FC_TX)
2290		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2291	else
2292		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2293
2294	return 0;
2295}
2296
2297/* Interrupt from PHY */
2298static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
2299{
2300	struct net_device *dev = hw->dev[port];
2301	struct sky2_port *sky2 = netdev_priv(dev);
2302	u16 istatus, phystat;
2303
2304	if (!netif_running(dev))
2305		return;
2306
2307	spin_lock(&sky2->phy_lock);
2308	istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2309	phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2310
2311	netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
2312		   istatus, phystat);
2313
2314	if (istatus & PHY_M_IS_AN_COMPL) {
2315		if (sky2_autoneg_done(sky2, phystat) == 0 &&
2316		    !netif_carrier_ok(dev))
2317			sky2_link_up(sky2);
2318		goto out;
2319	}
2320
2321	if (istatus & PHY_M_IS_LSP_CHANGE)
2322		sky2->speed = sky2_phy_speed(hw, phystat);
2323
2324	if (istatus & PHY_M_IS_DUP_CHANGE)
2325		sky2->duplex =
2326		    (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2327
2328	if (istatus & PHY_M_IS_LST_CHANGE) {
2329		if (phystat & PHY_M_PS_LINK_UP)
2330			sky2_link_up(sky2);
2331		else
2332			sky2_link_down(sky2);
2333	}
2334out:
2335	spin_unlock(&sky2->phy_lock);
2336}
2337
2338/* Special quick link interrupt (Yukon-2 Optima only) */
2339static void sky2_qlink_intr(struct sky2_hw *hw)
2340{
2341	struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
2342	u32 imask;
2343	u16 phy;
2344
2345	/* disable irq */
2346	imask = sky2_read32(hw, B0_IMSK);
2347	imask &= ~Y2_IS_PHY_QLNK;
2348	sky2_write32(hw, B0_IMSK, imask);
2349
2350	/* reset PHY Link Detect */
2351	phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2352	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2353	sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2354	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2355
2356	sky2_link_up(sky2);
2357}
2358
2359/* Transmit timeout is only called if we are running, carrier is up
2360 * and tx queue is full (stopped).
2361 */
2362static void sky2_tx_timeout(struct net_device *dev, unsigned int txqueue)
2363{
2364	struct sky2_port *sky2 = netdev_priv(dev);
2365	struct sky2_hw *hw = sky2->hw;
2366
2367	netif_err(sky2, timer, dev, "tx timeout\n");
2368
2369	netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n",
2370		      sky2->tx_cons, sky2->tx_prod,
2371		      sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
2372		      sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
2373
2374	/* can't restart safely under softirq */
2375	schedule_work(&hw->restart_work);
2376}
2377
2378static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2379{
2380	struct sky2_port *sky2 = netdev_priv(dev);
2381	struct sky2_hw *hw = sky2->hw;
2382	unsigned port = sky2->port;
2383	int err;
2384	u16 ctl, mode;
2385	u32 imask;
2386
2387	if (!netif_running(dev)) {
2388		WRITE_ONCE(dev->mtu, new_mtu);
2389		netdev_update_features(dev);
2390		return 0;
2391	}
2392
2393	imask = sky2_read32(hw, B0_IMSK);
2394	sky2_write32(hw, B0_IMSK, 0);
2395	sky2_read32(hw, B0_IMSK);
2396
2397	netif_trans_update(dev);	/* prevent tx timeout */
2398	napi_disable(&hw->napi);
2399	netif_tx_disable(dev);
2400
2401	synchronize_irq(hw->pdev->irq);
2402
2403	if (!(hw->flags & SKY2_HW_RAM_BUFFER))
2404		sky2_set_tx_stfwd(hw, port);
2405
2406	ctl = gma_read16(hw, port, GM_GP_CTRL);
2407	gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
2408	sky2_rx_stop(sky2);
2409	sky2_rx_clean(sky2);
2410
2411	WRITE_ONCE(dev->mtu, new_mtu);
2412	netdev_update_features(dev);
2413
2414	mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |	GM_SMOD_VLAN_ENA;
2415	if (sky2->speed > SPEED_100)
2416		mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
2417	else
2418		mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
2419
2420	if (dev->mtu > ETH_DATA_LEN)
2421		mode |= GM_SMOD_JUMBO_ENA;
2422
2423	gma_write16(hw, port, GM_SERIAL_MODE, mode);
2424
2425	sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
2426
2427	err = sky2_alloc_rx_skbs(sky2);
2428	if (!err)
2429		sky2_rx_start(sky2);
2430	else
2431		sky2_rx_clean(sky2);
2432	sky2_write32(hw, B0_IMSK, imask);
2433
2434	sky2_read32(hw, B0_Y2_SP_LISR);
2435	napi_enable(&hw->napi);
2436
2437	if (err)
2438		dev_close(dev);
2439	else {
2440		gma_write16(hw, port, GM_GP_CTRL, ctl);
2441
2442		netif_wake_queue(dev);
2443	}
2444
2445	return err;
2446}
2447
2448static inline bool needs_copy(const struct rx_ring_info *re,
2449			      unsigned length)
2450{
2451#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2452	/* Some architectures need the IP header to be aligned */
2453	if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
2454		return true;
2455#endif
2456	return length < copybreak;
2457}
2458
2459/* For small just reuse existing skb for next receive */
2460static struct sk_buff *receive_copy(struct sky2_port *sky2,
2461				    const struct rx_ring_info *re,
2462				    unsigned length)
2463{
2464	struct sk_buff *skb;
2465
2466	skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
2467	if (likely(skb)) {
2468		dma_sync_single_for_cpu(&sky2->hw->pdev->dev, re->data_addr,
2469					length, DMA_FROM_DEVICE);
2470		skb_copy_from_linear_data(re->skb, skb->data, length);
2471		skb->ip_summed = re->skb->ip_summed;
2472		skb->csum = re->skb->csum;
2473		skb_copy_hash(skb, re->skb);
2474		__vlan_hwaccel_copy_tag(skb, re->skb);
2475
2476		dma_sync_single_for_device(&sky2->hw->pdev->dev,
2477					   re->data_addr, length,
2478					   DMA_FROM_DEVICE);
2479		__vlan_hwaccel_clear_tag(re->skb);
2480		skb_clear_hash(re->skb);
2481		re->skb->ip_summed = CHECKSUM_NONE;
2482		skb_put(skb, length);
2483	}
2484	return skb;
2485}
2486
2487/* Adjust length of skb with fragments to match received data */
2488static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
2489			  unsigned int length)
2490{
2491	int i, num_frags;
2492	unsigned int size;
2493
2494	/* put header into skb */
2495	size = min(length, hdr_space);
2496	skb->tail += size;
2497	skb->len += size;
2498	length -= size;
2499
2500	num_frags = skb_shinfo(skb)->nr_frags;
2501	for (i = 0; i < num_frags; i++) {
2502		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2503
2504		if (length == 0) {
2505			/* don't need this page */
2506			__skb_frag_unref(frag, false);
2507			--skb_shinfo(skb)->nr_frags;
2508		} else {
2509			size = min(length, (unsigned) PAGE_SIZE);
2510
2511			skb_frag_size_set(frag, size);
2512			skb->data_len += size;
2513			skb->truesize += PAGE_SIZE;
2514			skb->len += size;
2515			length -= size;
2516		}
2517	}
2518}
2519
2520/* Normal packet - take skb from ring element and put in a new one  */
2521static struct sk_buff *receive_new(struct sky2_port *sky2,
2522				   struct rx_ring_info *re,
2523				   unsigned int length)
2524{
2525	struct sk_buff *skb;
2526	struct rx_ring_info nre;
2527	unsigned hdr_space = sky2->rx_data_size;
2528
2529	nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC);
2530	if (unlikely(!nre.skb))
2531		goto nobuf;
2532
2533	if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
2534		goto nomap;
2535
2536	skb = re->skb;
2537	sky2_rx_unmap_skb(sky2->hw->pdev, re);
2538	prefetch(skb->data);
2539	*re = nre;
2540
2541	if (skb_shinfo(skb)->nr_frags)
2542		skb_put_frags(skb, hdr_space, length);
2543	else
2544		skb_put(skb, length);
2545	return skb;
2546
2547nomap:
2548	dev_kfree_skb(nre.skb);
2549nobuf:
2550	return NULL;
2551}
2552
2553/*
2554 * Receive one packet.
2555 * For larger packets, get new buffer.
2556 */
2557static struct sk_buff *sky2_receive(struct net_device *dev,
2558				    u16 length, u32 status)
2559{
2560	struct sky2_port *sky2 = netdev_priv(dev);
2561	struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
2562	struct sk_buff *skb = NULL;
2563	u16 count = (status & GMR_FS_LEN) >> 16;
2564
2565	netif_printk(sky2, rx_status, KERN_DEBUG, dev,
2566		     "rx slot %u status 0x%x len %d\n",
2567		     sky2->rx_next, status, length);
2568
2569	sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2570	prefetch(sky2->rx_ring + sky2->rx_next);
2571
2572	if (skb_vlan_tag_present(re->skb))
2573		count -= VLAN_HLEN;	/* Account for vlan tag */
2574
2575	/* This chip has hardware problems that generates bogus status.
2576	 * So do only marginal checking and expect higher level protocols
2577	 * to handle crap frames.
2578	 */
2579	if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
2580	    sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
2581	    length != count)
2582		goto okay;
2583
2584	if (status & GMR_FS_ANY_ERR)
2585		goto error;
2586
2587	if (!(status & GMR_FS_RX_OK))
2588		goto resubmit;
2589
2590	/* if length reported by DMA does not match PHY, packet was truncated */
2591	if (length != count)
2592		goto error;
2593
2594okay:
2595	if (needs_copy(re, length))
2596		skb = receive_copy(sky2, re, length);
2597	else
2598		skb = receive_new(sky2, re, length);
2599
2600	dev->stats.rx_dropped += (skb == NULL);
2601
2602resubmit:
2603	sky2_rx_submit(sky2, re);
2604
2605	return skb;
2606
2607error:
2608	++dev->stats.rx_errors;
2609
2610	if (net_ratelimit())
2611		netif_info(sky2, rx_err, dev,
2612			   "rx error, status 0x%x length %d\n", status, length);
2613
2614	goto resubmit;
2615}
2616
2617/* Transmit complete */
2618static inline void sky2_tx_done(struct net_device *dev, u16 last)
2619{
2620	struct sky2_port *sky2 = netdev_priv(dev);
2621
2622	if (netif_running(dev)) {
2623		sky2_tx_complete(sky2, last);
2624
2625		/* Wake unless it's detached, and called e.g. from sky2_close() */
2626		if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
2627			netif_wake_queue(dev);
2628	}
2629}
2630
2631static inline void sky2_skb_rx(const struct sky2_port *sky2,
2632			       struct sk_buff *skb)
2633{
2634	if (skb->ip_summed == CHECKSUM_NONE)
2635		netif_receive_skb(skb);
2636	else
2637		napi_gro_receive(&sky2->hw->napi, skb);
2638}
2639
2640static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
2641				unsigned packets, unsigned bytes)
2642{
2643	struct net_device *dev = hw->dev[port];
2644	struct sky2_port *sky2 = netdev_priv(dev);
2645
2646	if (packets == 0)
2647		return;
2648
2649	u64_stats_update_begin(&sky2->rx_stats.syncp);
2650	sky2->rx_stats.packets += packets;
2651	sky2->rx_stats.bytes += bytes;
2652	u64_stats_update_end(&sky2->rx_stats.syncp);
2653
2654	sky2->last_rx = jiffies;
2655	sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
2656}
2657
2658static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
2659{
2660	/* If this happens then driver assuming wrong format for chip type */
2661	BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
2662
2663	/* Both checksum counters are programmed to start at
2664	 * the same offset, so unless there is a problem they
2665	 * should match. This failure is an early indication that
2666	 * hardware receive checksumming won't work.
2667	 */
2668	if (likely((u16)(status >> 16) == (u16)status)) {
2669		struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
2670		skb->ip_summed = CHECKSUM_COMPLETE;
2671		skb->csum = le16_to_cpu(status);
2672	} else {
2673		dev_notice(&sky2->hw->pdev->dev,
2674			   "%s: receive checksum problem (status = %#x)\n",
2675			   sky2->netdev->name, status);
2676
2677		/* Disable checksum offload
2678		 * It will be reenabled on next ndo_set_features, but if it's
2679		 * really broken, will get disabled again
2680		 */
2681		sky2->netdev->features &= ~NETIF_F_RXCSUM;
2682		sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2683			     BMU_DIS_RX_CHKSUM);
2684	}
2685}
2686
2687static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
2688{
2689	struct sk_buff *skb;
2690
2691	skb = sky2->rx_ring[sky2->rx_next].skb;
2692	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
2693}
2694
2695static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
2696{
2697	struct sk_buff *skb;
2698
2699	skb = sky2->rx_ring[sky2->rx_next].skb;
2700	skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
2701}
2702
2703/* Process status response ring */
2704static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2705{
2706	int work_done = 0;
2707	unsigned int total_bytes[2] = { 0 };
2708	unsigned int total_packets[2] = { 0 };
2709
2710	if (to_do <= 0)
2711		return work_done;
2712
2713	rmb();
2714	do {
2715		struct sky2_port *sky2;
2716		struct sky2_status_le *le  = hw->st_le + hw->st_idx;
2717		unsigned port;
2718		struct net_device *dev;
2719		struct sk_buff *skb;
2720		u32 status;
2721		u16 length;
2722		u8 opcode = le->opcode;
2723
2724		if (!(opcode & HW_OWNER))
2725			break;
2726
2727		hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
2728
2729		port = le->css & CSS_LINK_BIT;
2730		dev = hw->dev[port];
2731		sky2 = netdev_priv(dev);
2732		length = le16_to_cpu(le->length);
2733		status = le32_to_cpu(le->status);
2734
2735		le->opcode = 0;
2736		switch (opcode & ~HW_OWNER) {
2737		case OP_RXSTAT:
2738			total_packets[port]++;
2739			total_bytes[port] += length;
2740
2741			skb = sky2_receive(dev, length, status);
2742			if (!skb)
2743				break;
2744
2745			/* This chip reports checksum status differently */
2746			if (hw->flags & SKY2_HW_NEW_LE) {
2747				if ((dev->features & NETIF_F_RXCSUM) &&
2748				    (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
2749				    (le->css & CSS_TCPUDPCSOK))
2750					skb->ip_summed = CHECKSUM_UNNECESSARY;
2751				else
2752					skb->ip_summed = CHECKSUM_NONE;
2753			}
2754
2755			skb->protocol = eth_type_trans(skb, dev);
2756			sky2_skb_rx(sky2, skb);
2757
2758			/* Stop after net poll weight */
2759			if (++work_done >= to_do)
2760				goto exit_loop;
2761			break;
2762
2763		case OP_RXVLAN:
2764			sky2_rx_tag(sky2, length);
2765			break;
2766
2767		case OP_RXCHKSVLAN:
2768			sky2_rx_tag(sky2, length);
2769			fallthrough;
2770		case OP_RXCHKS:
2771			if (likely(dev->features & NETIF_F_RXCSUM))
2772				sky2_rx_checksum(sky2, status);
2773			break;
2774
2775		case OP_RSS_HASH:
2776			sky2_rx_hash(sky2, status);
2777			break;
2778
2779		case OP_TXINDEXLE:
2780			/* TX index reports status for both ports */
2781			sky2_tx_done(hw->dev[0], status & 0xfff);
2782			if (hw->dev[1])
2783				sky2_tx_done(hw->dev[1],
2784				     ((status >> 24) & 0xff)
2785					     | (u16)(length & 0xf) << 8);
2786			break;
2787
2788		default:
2789			if (net_ratelimit())
2790				pr_warn("unknown status opcode 0x%x\n", opcode);
2791		}
2792	} while (hw->st_idx != idx);
2793
2794	/* Fully processed status ring so clear irq */
2795	sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2796
2797exit_loop:
2798	sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
2799	sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
2800
2801	return work_done;
2802}
2803
2804static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
2805{
2806	struct net_device *dev = hw->dev[port];
2807
2808	if (net_ratelimit())
2809		netdev_info(dev, "hw error interrupt status 0x%x\n", status);
2810
2811	if (status & Y2_IS_PAR_RD1) {
2812		if (net_ratelimit())
2813			netdev_err(dev, "ram data read parity error\n");
2814		/* Clear IRQ */
2815		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
2816	}
2817
2818	if (status & Y2_IS_PAR_WR1) {
2819		if (net_ratelimit())
2820			netdev_err(dev, "ram data write parity error\n");
2821
2822		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
2823	}
2824
2825	if (status & Y2_IS_PAR_MAC1) {
2826		if (net_ratelimit())
2827			netdev_err(dev, "MAC parity error\n");
2828		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
2829	}
2830
2831	if (status & Y2_IS_PAR_RX1) {
2832		if (net_ratelimit())
2833			netdev_err(dev, "RX parity error\n");
2834		sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
2835	}
2836
2837	if (status & Y2_IS_TCP_TXA1) {
2838		if (net_ratelimit())
2839			netdev_err(dev, "TCP segmentation error\n");
2840		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
2841	}
2842}
2843
2844static void sky2_hw_intr(struct sky2_hw *hw)
2845{
2846	struct pci_dev *pdev = hw->pdev;
2847	u32 status = sky2_read32(hw, B0_HWE_ISRC);
2848	u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2849
2850	status &= hwmsk;
2851
2852	if (status & Y2_IS_TIST_OV)
2853		sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2854
2855	if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2856		u16 pci_err;
2857
2858		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2859		pci_err = sky2_pci_read16(hw, PCI_STATUS);
2860		if (net_ratelimit())
2861			dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
2862			        pci_err);
2863
2864		sky2_pci_write16(hw, PCI_STATUS,
2865				      pci_err | PCI_STATUS_ERROR_BITS);
2866		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2867	}
2868
2869	if (status & Y2_IS_PCI_EXP) {
2870		/* PCI-Express uncorrectable Error occurred */
2871		u32 err;
2872
2873		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2874		err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2875		sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2876			     0xfffffffful);
2877		if (net_ratelimit())
2878			dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
2879
2880		sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2881		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2882	}
2883
2884	if (status & Y2_HWE_L1_MASK)
2885		sky2_hw_error(hw, 0, status);
2886	status >>= 8;
2887	if (status & Y2_HWE_L1_MASK)
2888		sky2_hw_error(hw, 1, status);
2889}
2890
2891static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2892{
2893	struct net_device *dev = hw->dev[port];
2894	struct sky2_port *sky2 = netdev_priv(dev);
2895	u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2896
2897	netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
2898
2899	if (status & GM_IS_RX_CO_OV)
2900		gma_read16(hw, port, GM_RX_IRQ_SRC);
2901
2902	if (status & GM_IS_TX_CO_OV)
2903		gma_read16(hw, port, GM_TX_IRQ_SRC);
2904
2905	if (status & GM_IS_RX_FF_OR) {
2906		++dev->stats.rx_fifo_errors;
2907		sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2908	}
2909
2910	if (status & GM_IS_TX_FF_UR) {
2911		++dev->stats.tx_fifo_errors;
2912		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2913	}
2914}
2915
2916/* This should never happen it is a bug. */
2917static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
2918{
2919	struct net_device *dev = hw->dev[port];
2920	u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
2921
2922	dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n",
2923		dev->name, (unsigned) q, (unsigned) idx,
2924		(unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
2925
2926	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
2927}
2928
2929static int sky2_rx_hung(struct net_device *dev)
2930{
2931	struct sky2_port *sky2 = netdev_priv(dev);
2932	struct sky2_hw *hw = sky2->hw;
2933	unsigned port = sky2->port;
2934	unsigned rxq = rxqaddr[port];
2935	u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
2936	u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
2937	u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
2938	u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
2939
2940	/* If idle and MAC or PCI is stuck */
2941	if (sky2->check.last == sky2->last_rx &&
2942	    ((mac_rp == sky2->check.mac_rp &&
2943	      mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
2944	     /* Check if the PCI RX hang */
2945	     (fifo_rp == sky2->check.fifo_rp &&
2946	      fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
2947		netdev_printk(KERN_DEBUG, dev,
2948			      "hung mac %d:%d fifo %d (%d:%d)\n",
2949			      mac_lev, mac_rp, fifo_lev,
2950			      fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
2951		return 1;
2952	} else {
2953		sky2->check.last = sky2->last_rx;
2954		sky2->check.mac_rp = mac_rp;
2955		sky2->check.mac_lev = mac_lev;
2956		sky2->check.fifo_rp = fifo_rp;
2957		sky2->check.fifo_lev = fifo_lev;
2958		return 0;
2959	}
2960}
2961
2962static void sky2_watchdog(struct timer_list *t)
2963{
2964	struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
2965
2966	/* Check for lost IRQ once a second */
2967	if (sky2_read32(hw, B0_ISRC)) {
2968		napi_schedule(&hw->napi);
2969	} else {
2970		int i, active = 0;
2971
2972		for (i = 0; i < hw->ports; i++) {
2973			struct net_device *dev = hw->dev[i];
2974			if (!netif_running(dev))
2975				continue;
2976			++active;
2977
2978			/* For chips with Rx FIFO, check if stuck */
2979			if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
2980			     sky2_rx_hung(dev)) {
2981				netdev_info(dev, "receiver hang detected\n");
2982				schedule_work(&hw->restart_work);
2983				return;
2984			}
2985		}
2986
2987		if (active == 0)
2988			return;
2989	}
2990
2991	mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
2992}
2993
2994/* Hardware/software error handling */
2995static void sky2_err_intr(struct sky2_hw *hw, u32 status)
2996{
2997	if (net_ratelimit())
2998		dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
2999
3000	if (status & Y2_IS_HW_ERR)
3001		sky2_hw_intr(hw);
3002
3003	if (status & Y2_IS_IRQ_MAC1)
3004		sky2_mac_intr(hw, 0);
3005
3006	if (status & Y2_IS_IRQ_MAC2)
3007		sky2_mac_intr(hw, 1);
3008
3009	if (status & Y2_IS_CHK_RX1)
3010		sky2_le_error(hw, 0, Q_R1);
3011
3012	if (status & Y2_IS_CHK_RX2)
3013		sky2_le_error(hw, 1, Q_R2);
3014
3015	if (status & Y2_IS_CHK_TXA1)
3016		sky2_le_error(hw, 0, Q_XA1);
3017
3018	if (status & Y2_IS_CHK_TXA2)
3019		sky2_le_error(hw, 1, Q_XA2);
3020}
3021
3022static int sky2_poll(struct napi_struct *napi, int work_limit)
3023{
3024	struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi);
3025	u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
3026	int work_done = 0;
3027	u16 idx;
3028
3029	if (unlikely(status & Y2_IS_ERROR))
3030		sky2_err_intr(hw, status);
3031
3032	if (status & Y2_IS_IRQ_PHY1)
3033		sky2_phy_intr(hw, 0);
3034
3035	if (status & Y2_IS_IRQ_PHY2)
3036		sky2_phy_intr(hw, 1);
3037
3038	if (status & Y2_IS_PHY_QLNK)
3039		sky2_qlink_intr(hw);
3040
3041	while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
3042		work_done += sky2_status_intr(hw, work_limit - work_done, idx);
3043
3044		if (work_done >= work_limit)
3045			goto done;
3046	}
3047
3048	napi_complete_done(napi, work_done);
3049	sky2_read32(hw, B0_Y2_SP_LISR);
3050done:
3051
3052	return work_done;
3053}
3054
3055static irqreturn_t sky2_intr(int irq, void *dev_id)
3056{
3057	struct sky2_hw *hw = dev_id;
3058	u32 status;
3059
3060	/* Reading this mask interrupts as side effect */
3061	status = sky2_read32(hw, B0_Y2_SP_ISRC2);
3062	if (status == 0 || status == ~0) {
3063		sky2_write32(hw, B0_Y2_SP_ICR, 2);
3064		return IRQ_NONE;
3065	}
3066
3067	prefetch(&hw->st_le[hw->st_idx]);
3068
3069	napi_schedule(&hw->napi);
3070
3071	return IRQ_HANDLED;
3072}
3073
3074#ifdef CONFIG_NET_POLL_CONTROLLER
3075static void sky2_netpoll(struct net_device *dev)
3076{
3077	struct sky2_port *sky2 = netdev_priv(dev);
3078
3079	napi_schedule(&sky2->hw->napi);
3080}
3081#endif
3082
3083/* Chip internal frequency for clock calculations */
3084static u32 sky2_mhz(const struct sky2_hw *hw)
3085{
3086	switch (hw->chip_id) {
3087	case CHIP_ID_YUKON_EC:
3088	case CHIP_ID_YUKON_EC_U:
3089	case CHIP_ID_YUKON_EX:
3090	case CHIP_ID_YUKON_SUPR:
3091	case CHIP_ID_YUKON_UL_2:
3092	case CHIP_ID_YUKON_OPT:
3093	case CHIP_ID_YUKON_PRM:
3094	case CHIP_ID_YUKON_OP_2:
3095		return 125;
3096
3097	case CHIP_ID_YUKON_FE:
3098		return 100;
3099
3100	case CHIP_ID_YUKON_FE_P:
3101		return 50;
3102
3103	case CHIP_ID_YUKON_XL:
3104		return 156;
3105
3106	default:
3107		BUG();
3108	}
3109}
3110
3111static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
3112{
3113	return sky2_mhz(hw) * us;
3114}
3115
3116static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
3117{
3118	return clk / sky2_mhz(hw);
3119}
3120
3121
3122static int sky2_init(struct sky2_hw *hw)
3123{
3124	u8 t8;
3125
3126	/* Enable all clocks and check for bad PCI access */
3127	sky2_pci_write32(hw, PCI_DEV_REG3, 0);
3128
3129	sky2_write8(hw, B0_CTST, CS_RST_CLR);
3130
3131	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
3132	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
3133
3134	switch (hw->chip_id) {
3135	case CHIP_ID_YUKON_XL:
3136		hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
3137		if (hw->chip_rev < CHIP_REV_YU_XL_A2)
3138			hw->flags |= SKY2_HW_RSS_BROKEN;
3139		break;
3140
3141	case CHIP_ID_YUKON_EC_U:
3142		hw->flags = SKY2_HW_GIGABIT
3143			| SKY2_HW_NEWER_PHY
3144			| SKY2_HW_ADV_POWER_CTL;
3145		break;
3146
3147	case CHIP_ID_YUKON_EX:
3148		hw->flags = SKY2_HW_GIGABIT
3149			| SKY2_HW_NEWER_PHY
3150			| SKY2_HW_NEW_LE
3151			| SKY2_HW_ADV_POWER_CTL
3152			| SKY2_HW_RSS_CHKSUM;
3153
3154		/* New transmit checksum */
3155		if (hw->chip_rev != CHIP_REV_YU_EX_B0)
3156			hw->flags |= SKY2_HW_AUTO_TX_SUM;
3157		break;
3158
3159	case CHIP_ID_YUKON_EC:
3160		/* This rev is really old, and requires untested workarounds */
3161		if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
3162			dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
3163			return -EOPNOTSUPP;
3164		}
3165		hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
3166		break;
3167
3168	case CHIP_ID_YUKON_FE:
3169		hw->flags = SKY2_HW_RSS_BROKEN;
3170		break;
3171
3172	case CHIP_ID_YUKON_FE_P:
3173		hw->flags = SKY2_HW_NEWER_PHY
3174			| SKY2_HW_NEW_LE
3175			| SKY2_HW_AUTO_TX_SUM
3176			| SKY2_HW_ADV_POWER_CTL;
3177
3178		/* The workaround for status conflicts VLAN tag detection. */
3179		if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
3180			hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM;
3181		break;
3182
3183	case CHIP_ID_YUKON_SUPR:
3184		hw->flags = SKY2_HW_GIGABIT
3185			| SKY2_HW_NEWER_PHY
3186			| SKY2_HW_NEW_LE
3187			| SKY2_HW_AUTO_TX_SUM
3188			| SKY2_HW_ADV_POWER_CTL;
3189
3190		if (hw->chip_rev == CHIP_REV_YU_SU_A0)
3191			hw->flags |= SKY2_HW_RSS_CHKSUM;
3192		break;
3193
3194	case CHIP_ID_YUKON_UL_2:
3195		hw->flags = SKY2_HW_GIGABIT
3196			| SKY2_HW_ADV_POWER_CTL;
3197		break;
3198
3199	case CHIP_ID_YUKON_OPT:
3200	case CHIP_ID_YUKON_PRM:
3201	case CHIP_ID_YUKON_OP_2:
3202		hw->flags = SKY2_HW_GIGABIT
3203			| SKY2_HW_NEW_LE
3204			| SKY2_HW_ADV_POWER_CTL;
3205		break;
3206
3207	default:
3208		dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
3209			hw->chip_id);
3210		return -EOPNOTSUPP;
3211	}
3212
3213	hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
3214	if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
3215		hw->flags |= SKY2_HW_FIBRE_PHY;
3216
3217	hw->ports = 1;
3218	t8 = sky2_read8(hw, B2_Y2_HW_RES);
3219	if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
3220		if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
3221			++hw->ports;
3222	}
3223
3224	if (sky2_read8(hw, B2_E_0))
3225		hw->flags |= SKY2_HW_RAM_BUFFER;
3226
3227	return 0;
3228}
3229
3230static void sky2_reset(struct sky2_hw *hw)
3231{
3232	struct pci_dev *pdev = hw->pdev;
3233	u16 status;
3234	int i;
3235	u32 hwe_mask = Y2_HWE_ALL_MASK;
3236
3237	/* disable ASF */
3238	if (hw->chip_id == CHIP_ID_YUKON_EX
3239	    || hw->chip_id == CHIP_ID_YUKON_SUPR) {
3240		sky2_write32(hw, CPU_WDOG, 0);
3241		status = sky2_read16(hw, HCU_CCSR);
3242		status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
3243			    HCU_CCSR_UC_STATE_MSK);
3244		/*
3245		 * CPU clock divider shouldn't be used because
3246		 * - ASF firmware may malfunction
3247		 * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
3248		 */
3249		status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
3250		sky2_write16(hw, HCU_CCSR, status);
3251		sky2_write32(hw, CPU_WDOG, 0);
3252	} else
3253		sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
3254	sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
3255
3256	/* do a SW reset */
3257	sky2_write8(hw, B0_CTST, CS_RST_SET);
3258	sky2_write8(hw, B0_CTST, CS_RST_CLR);
3259
3260	/* allow writes to PCI config */
3261	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3262
3263	/* clear PCI errors, if any */
3264	status = sky2_pci_read16(hw, PCI_STATUS);
3265	status |= PCI_STATUS_ERROR_BITS;
3266	sky2_pci_write16(hw, PCI_STATUS, status);
3267
3268	sky2_write8(hw, B0_CTST, CS_MRST_CLR);
3269
3270	if (pci_is_pcie(pdev)) {
3271		sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
3272			     0xfffffffful);
3273
3274		/* If error bit is stuck on ignore it */
3275		if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
3276			dev_info(&pdev->dev, "ignoring stuck error report bit\n");
3277		else
3278			hwe_mask |= Y2_IS_PCI_EXP;
3279	}
3280
3281	sky2_power_on(hw);
3282	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3283
3284	for (i = 0; i < hw->ports; i++) {
3285		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3286		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3287
3288		if (hw->chip_id == CHIP_ID_YUKON_EX ||
3289		    hw->chip_id == CHIP_ID_YUKON_SUPR)
3290			sky2_write16(hw, SK_REG(i, GMAC_CTRL),
3291				     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
3292				     | GMC_BYP_RETR_ON);
3293
3294	}
3295
3296	if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
3297		/* enable MACSec clock gating */
3298		sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
3299	}
3300
3301	if (hw->chip_id == CHIP_ID_YUKON_OPT ||
3302	    hw->chip_id == CHIP_ID_YUKON_PRM ||
3303	    hw->chip_id == CHIP_ID_YUKON_OP_2) {
3304		u16 reg;
3305
3306		if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
3307			/* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
3308			sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
3309
3310			/* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
3311			reg = 10;
3312
3313			/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3314			sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
3315		} else {
3316			/* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
3317			reg = 3;
3318		}
3319
3320		reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3321		reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT;
3322
3323		/* reset PHY Link Detect */
3324		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3325		sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
3326
3327		/* check if PSMv2 was running before */
3328		reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3329		if (reg & PCI_EXP_LNKCTL_ASPMC)
3330			/* restore the PCIe Link Control register */
3331			sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
3332					 reg);
3333
3334		if (hw->chip_id == CHIP_ID_YUKON_PRM &&
3335			hw->chip_rev == CHIP_REV_YU_PRM_A0) {
3336			/* change PHY Interrupt polarity to low active */
3337			reg = sky2_read16(hw, GPHY_CTRL);
3338			sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
3339
3340			/* adapt HW for low active PHY Interrupt */
3341			reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
3342			sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
3343		}
3344
3345		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3346
3347		/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3348		sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
3349	}
3350
3351	/* Clear I2C IRQ noise */
3352	sky2_write32(hw, B2_I2C_IRQ, 1);
3353
3354	/* turn off hardware timer (unused) */
3355	sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
3356	sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
3357
3358	/* Turn off descriptor polling */
3359	sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
3360
3361	/* Turn off receive timestamp */
3362	sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
3363	sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3364
3365	/* enable the Tx Arbiters */
3366	for (i = 0; i < hw->ports; i++)
3367		sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3368
3369	/* Initialize ram interface */
3370	for (i = 0; i < hw->ports; i++) {
3371		sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
3372
3373		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
3374		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
3375		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
3376		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
3377		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
3378		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
3379		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
3380		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
3381		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
3382		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
3383		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
3384		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
3385	}
3386
3387	sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
3388
3389	for (i = 0; i < hw->ports; i++)
3390		sky2_gmac_reset(hw, i);
3391
3392	memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
3393	hw->st_idx = 0;
3394
3395	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
3396	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
3397
3398	sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
3399	sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
3400
3401	/* Set the list last index */
3402	sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
3403
3404	sky2_write16(hw, STAT_TX_IDX_TH, 10);
3405	sky2_write8(hw, STAT_FIFO_WM, 16);
3406
3407	/* set Status-FIFO ISR watermark */
3408	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
3409		sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
3410	else
3411		sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
3412
3413	sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
3414	sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
3415	sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
3416
3417	/* enable status unit */
3418	sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
3419
3420	sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
3421	sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
3422	sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
3423}
3424
3425/* Take device down (offline).
3426 * Equivalent to doing dev_stop() but this does not
3427 * inform upper layers of the transition.
3428 */
3429static void sky2_detach(struct net_device *dev)
3430{
3431	if (netif_running(dev)) {
3432		netif_tx_lock(dev);
3433		netif_device_detach(dev);	/* stop txq */
3434		netif_tx_unlock(dev);
3435		sky2_close(dev);
3436	}
3437}
3438
3439/* Bring device back after doing sky2_detach */
3440static int sky2_reattach(struct net_device *dev)
3441{
3442	int err = 0;
3443
3444	if (netif_running(dev)) {
3445		err = sky2_open(dev);
3446		if (err) {
3447			netdev_info(dev, "could not restart %d\n", err);
3448			dev_close(dev);
3449		} else {
3450			netif_device_attach(dev);
3451			sky2_set_multicast(dev);
3452		}
3453	}
3454
3455	return err;
3456}
3457
3458static void sky2_all_down(struct sky2_hw *hw)
3459{
3460	int i;
3461
3462	if (hw->flags & SKY2_HW_IRQ_SETUP) {
3463		sky2_write32(hw, B0_IMSK, 0);
3464		sky2_read32(hw, B0_IMSK);
3465
3466		synchronize_irq(hw->pdev->irq);
3467		napi_disable(&hw->napi);
3468	}
3469
3470	for (i = 0; i < hw->ports; i++) {
3471		struct net_device *dev = hw->dev[i];
3472		struct sky2_port *sky2 = netdev_priv(dev);
3473
3474		if (!netif_running(dev))
3475			continue;
3476
3477		netif_carrier_off(dev);
3478		netif_tx_disable(dev);
3479		sky2_hw_down(sky2);
3480	}
3481}
3482
3483static void sky2_all_up(struct sky2_hw *hw)
3484{
3485	u32 imask = Y2_IS_BASE;
3486	int i;
3487
3488	for (i = 0; i < hw->ports; i++) {
3489		struct net_device *dev = hw->dev[i];
3490		struct sky2_port *sky2 = netdev_priv(dev);
3491
3492		if (!netif_running(dev))
3493			continue;
3494
3495		sky2_hw_up(sky2);
3496		sky2_set_multicast(dev);
3497		imask |= portirq_msk[i];
3498		netif_wake_queue(dev);
3499	}
3500
3501	if (hw->flags & SKY2_HW_IRQ_SETUP) {
3502		sky2_write32(hw, B0_IMSK, imask);
3503		sky2_read32(hw, B0_IMSK);
3504		sky2_read32(hw, B0_Y2_SP_LISR);
3505		napi_enable(&hw->napi);
3506	}
3507}
3508
3509static void sky2_restart(struct work_struct *work)
3510{
3511	struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
3512
3513	rtnl_lock();
3514
3515	sky2_all_down(hw);
3516	sky2_reset(hw);
3517	sky2_all_up(hw);
3518
3519	rtnl_unlock();
3520}
3521
3522static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
3523{
3524	return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
3525}
3526
3527static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3528{
3529	const struct sky2_port *sky2 = netdev_priv(dev);
3530
3531	wol->supported = sky2_wol_supported(sky2->hw);
3532	wol->wolopts = sky2->wol;
3533}
3534
3535static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3536{
3537	struct sky2_port *sky2 = netdev_priv(dev);
3538	struct sky2_hw *hw = sky2->hw;
3539	bool enable_wakeup = false;
3540	int i;
3541
3542	if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) ||
3543	    !device_can_wakeup(&hw->pdev->dev))
3544		return -EOPNOTSUPP;
3545
3546	sky2->wol = wol->wolopts;
3547
3548	for (i = 0; i < hw->ports; i++) {
3549		struct net_device *dev = hw->dev[i];
3550		struct sky2_port *sky2 = netdev_priv(dev);
3551
3552		if (sky2->wol)
3553			enable_wakeup = true;
3554	}
3555	device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup);
3556
3557	return 0;
3558}
3559
3560static u32 sky2_supported_modes(const struct sky2_hw *hw)
3561{
3562	if (sky2_is_copper(hw)) {
3563		u32 modes = SUPPORTED_10baseT_Half
3564			| SUPPORTED_10baseT_Full
3565			| SUPPORTED_100baseT_Half
3566			| SUPPORTED_100baseT_Full;
3567
3568		if (hw->flags & SKY2_HW_GIGABIT)
3569			modes |= SUPPORTED_1000baseT_Half
3570				| SUPPORTED_1000baseT_Full;
3571		return modes;
3572	} else
3573		return SUPPORTED_1000baseT_Half
3574			| SUPPORTED_1000baseT_Full;
3575}
3576
3577static int sky2_get_link_ksettings(struct net_device *dev,
3578				   struct ethtool_link_ksettings *cmd)
3579{
3580	struct sky2_port *sky2 = netdev_priv(dev);
3581	struct sky2_hw *hw = sky2->hw;
3582	u32 supported, advertising;
3583
3584	supported = sky2_supported_modes(hw);
3585	cmd->base.phy_address = PHY_ADDR_MARV;
3586	if (sky2_is_copper(hw)) {
3587		cmd->base.port = PORT_TP;
3588		cmd->base.speed = sky2->speed;
3589		supported |=  SUPPORTED_Autoneg | SUPPORTED_TP;
3590	} else {
3591		cmd->base.speed = SPEED_1000;
3592		cmd->base.port = PORT_FIBRE;
3593		supported |=  SUPPORTED_Autoneg | SUPPORTED_FIBRE;
3594	}
3595
3596	advertising = sky2->advertising;
3597	cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
3598		? AUTONEG_ENABLE : AUTONEG_DISABLE;
3599	cmd->base.duplex = sky2->duplex;
3600
3601	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3602						supported);
3603	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3604						advertising);
3605
3606	return 0;
3607}
3608
3609static int sky2_set_link_ksettings(struct net_device *dev,
3610				   const struct ethtool_link_ksettings *cmd)
3611{
3612	struct sky2_port *sky2 = netdev_priv(dev);
3613	const struct sky2_hw *hw = sky2->hw;
3614	u32 supported = sky2_supported_modes(hw);
3615	u32 new_advertising;
3616
3617	ethtool_convert_link_mode_to_legacy_u32(&new_advertising,
3618						cmd->link_modes.advertising);
3619
3620	if (cmd->base.autoneg == AUTONEG_ENABLE) {
3621		if (new_advertising & ~supported)
3622			return -EINVAL;
3623
3624		if (sky2_is_copper(hw))
3625			sky2->advertising = new_advertising |
3626					    ADVERTISED_TP |
3627					    ADVERTISED_Autoneg;
3628		else
3629			sky2->advertising = new_advertising |
3630					    ADVERTISED_FIBRE |
3631					    ADVERTISED_Autoneg;
3632
3633		sky2->flags |= SKY2_FLAG_AUTO_SPEED;
3634		sky2->duplex = -1;
3635		sky2->speed = -1;
3636	} else {
3637		u32 setting;
3638		u32 speed = cmd->base.speed;
3639
3640		switch (speed) {
3641		case SPEED_1000:
3642			if (cmd->base.duplex == DUPLEX_FULL)
3643				setting = SUPPORTED_1000baseT_Full;
3644			else if (cmd->base.duplex == DUPLEX_HALF)
3645				setting = SUPPORTED_1000baseT_Half;
3646			else
3647				return -EINVAL;
3648			break;
3649		case SPEED_100:
3650			if (cmd->base.duplex == DUPLEX_FULL)
3651				setting = SUPPORTED_100baseT_Full;
3652			else if (cmd->base.duplex == DUPLEX_HALF)
3653				setting = SUPPORTED_100baseT_Half;
3654			else
3655				return -EINVAL;
3656			break;
3657
3658		case SPEED_10:
3659			if (cmd->base.duplex == DUPLEX_FULL)
3660				setting = SUPPORTED_10baseT_Full;
3661			else if (cmd->base.duplex == DUPLEX_HALF)
3662				setting = SUPPORTED_10baseT_Half;
3663			else
3664				return -EINVAL;
3665			break;
3666		default:
3667			return -EINVAL;
3668		}
3669
3670		if ((setting & supported) == 0)
3671			return -EINVAL;
3672
3673		sky2->speed = speed;
3674		sky2->duplex = cmd->base.duplex;
3675		sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
3676	}
3677
3678	if (netif_running(dev)) {
3679		sky2_phy_reinit(sky2);
3680		sky2_set_multicast(dev);
3681	}
3682
3683	return 0;
3684}
3685
3686static void sky2_get_drvinfo(struct net_device *dev,
3687			     struct ethtool_drvinfo *info)
3688{
3689	struct sky2_port *sky2 = netdev_priv(dev);
3690
3691	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
3692	strscpy(info->version, DRV_VERSION, sizeof(info->version));
3693	strscpy(info->bus_info, pci_name(sky2->hw->pdev),
3694		sizeof(info->bus_info));
3695}
3696
3697static const struct sky2_stat {
3698	char name[ETH_GSTRING_LEN];
3699	u16 offset;
3700} sky2_stats[] = {
3701	{ "tx_bytes",	   GM_TXO_OK_HI },
3702	{ "rx_bytes",	   GM_RXO_OK_HI },
3703	{ "tx_broadcast",  GM_TXF_BC_OK },
3704	{ "rx_broadcast",  GM_RXF_BC_OK },
3705	{ "tx_multicast",  GM_TXF_MC_OK },
3706	{ "rx_multicast",  GM_RXF_MC_OK },
3707	{ "tx_unicast",    GM_TXF_UC_OK },
3708	{ "rx_unicast",    GM_RXF_UC_OK },
3709	{ "tx_mac_pause",  GM_TXF_MPAUSE },
3710	{ "rx_mac_pause",  GM_RXF_MPAUSE },
3711	{ "collisions",    GM_TXF_COL },
3712	{ "late_collision",GM_TXF_LAT_COL },
3713	{ "aborted", 	   GM_TXF_ABO_COL },
3714	{ "single_collisions", GM_TXF_SNG_COL },
3715	{ "multi_collisions", GM_TXF_MUL_COL },
3716
3717	{ "rx_short",      GM_RXF_SHT },
3718	{ "rx_runt", 	   GM_RXE_FRAG },
3719	{ "rx_64_byte_packets", GM_RXF_64B },
3720	{ "rx_65_to_127_byte_packets", GM_RXF_127B },
3721	{ "rx_128_to_255_byte_packets", GM_RXF_255B },
3722	{ "rx_256_to_511_byte_packets", GM_RXF_511B },
3723	{ "rx_512_to_1023_byte_packets", GM_RXF_1023B },
3724	{ "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
3725	{ "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
3726	{ "rx_too_long",   GM_RXF_LNG_ERR },
3727	{ "rx_fifo_overflow", GM_RXE_FIFO_OV },
3728	{ "rx_jabber",     GM_RXF_JAB_PKT },
3729	{ "rx_fcs_error",   GM_RXF_FCS_ERR },
3730
3731	{ "tx_64_byte_packets", GM_TXF_64B },
3732	{ "tx_65_to_127_byte_packets", GM_TXF_127B },
3733	{ "tx_128_to_255_byte_packets", GM_TXF_255B },
3734	{ "tx_256_to_511_byte_packets", GM_TXF_511B },
3735	{ "tx_512_to_1023_byte_packets", GM_TXF_1023B },
3736	{ "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
3737	{ "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
3738	{ "tx_fifo_underrun", GM_TXE_FIFO_UR },
3739};
3740
3741static u32 sky2_get_msglevel(struct net_device *netdev)
3742{
3743	struct sky2_port *sky2 = netdev_priv(netdev);
3744	return sky2->msg_enable;
3745}
3746
3747static int sky2_nway_reset(struct net_device *dev)
3748{
3749	struct sky2_port *sky2 = netdev_priv(dev);
3750
3751	if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED))
3752		return -EINVAL;
3753
3754	sky2_phy_reinit(sky2);
3755	sky2_set_multicast(dev);
3756
3757	return 0;
3758}
3759
3760static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
3761{
3762	struct sky2_hw *hw = sky2->hw;
3763	unsigned port = sky2->port;
3764	int i;
3765
3766	data[0] = get_stats64(hw, port, GM_TXO_OK_LO);
3767	data[1] = get_stats64(hw, port, GM_RXO_OK_LO);
3768
3769	for (i = 2; i < count; i++)
3770		data[i] = get_stats32(hw, port, sky2_stats[i].offset);
3771}
3772
3773static void sky2_set_msglevel(struct net_device *netdev, u32 value)
3774{
3775	struct sky2_port *sky2 = netdev_priv(netdev);
3776	sky2->msg_enable = value;
3777}
3778
3779static int sky2_get_sset_count(struct net_device *dev, int sset)
3780{
3781	switch (sset) {
3782	case ETH_SS_STATS:
3783		return ARRAY_SIZE(sky2_stats);
3784	default:
3785		return -EOPNOTSUPP;
3786	}
3787}
3788
3789static void sky2_get_ethtool_stats(struct net_device *dev,
3790				   struct ethtool_stats *stats, u64 * data)
3791{
3792	struct sky2_port *sky2 = netdev_priv(dev);
3793
3794	sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
3795}
3796
3797static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
3798{
3799	int i;
3800
3801	switch (stringset) {
3802	case ETH_SS_STATS:
3803		for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
3804			ethtool_puts(&data, sky2_stats[i].name);
 
3805		break;
3806	}
3807}
3808
3809static int sky2_set_mac_address(struct net_device *dev, void *p)
3810{
3811	struct sky2_port *sky2 = netdev_priv(dev);
3812	struct sky2_hw *hw = sky2->hw;
3813	unsigned port = sky2->port;
3814	const struct sockaddr *addr = p;
3815
3816	if (!is_valid_ether_addr(addr->sa_data))
3817		return -EADDRNOTAVAIL;
3818
3819	eth_hw_addr_set(dev, addr->sa_data);
3820	memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
3821		    dev->dev_addr, ETH_ALEN);
3822	memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
3823		    dev->dev_addr, ETH_ALEN);
3824
3825	/* virtual address for data */
3826	gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3827
3828	/* physical address: used for pause frames */
3829	gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3830
3831	return 0;
3832}
3833
3834static inline void sky2_add_filter(u8 filter[8], const u8 *addr)
3835{
3836	u32 bit;
3837
3838	bit = ether_crc(ETH_ALEN, addr) & 63;
3839	filter[bit >> 3] |= 1 << (bit & 7);
3840}
3841
3842static void sky2_set_multicast(struct net_device *dev)
3843{
3844	struct sky2_port *sky2 = netdev_priv(dev);
3845	struct sky2_hw *hw = sky2->hw;
3846	unsigned port = sky2->port;
3847	struct netdev_hw_addr *ha;
3848	u16 reg;
3849	u8 filter[8];
3850	int rx_pause;
3851	static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
3852
3853	rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
3854	memset(filter, 0, sizeof(filter));
3855
3856	reg = gma_read16(hw, port, GM_RX_CTRL);
3857	reg |= GM_RXCR_UCF_ENA;
3858
3859	if (dev->flags & IFF_PROMISC)	/* promiscuous */
3860		reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
3861	else if (dev->flags & IFF_ALLMULTI)
3862		memset(filter, 0xff, sizeof(filter));
3863	else if (netdev_mc_empty(dev) && !rx_pause)
3864		reg &= ~GM_RXCR_MCF_ENA;
3865	else {
3866		reg |= GM_RXCR_MCF_ENA;
3867
3868		if (rx_pause)
3869			sky2_add_filter(filter, pause_mc_addr);
3870
3871		netdev_for_each_mc_addr(ha, dev)
3872			sky2_add_filter(filter, ha->addr);
3873	}
3874
3875	gma_write16(hw, port, GM_MC_ADDR_H1,
3876		    (u16) filter[0] | ((u16) filter[1] << 8));
3877	gma_write16(hw, port, GM_MC_ADDR_H2,
3878		    (u16) filter[2] | ((u16) filter[3] << 8));
3879	gma_write16(hw, port, GM_MC_ADDR_H3,
3880		    (u16) filter[4] | ((u16) filter[5] << 8));
3881	gma_write16(hw, port, GM_MC_ADDR_H4,
3882		    (u16) filter[6] | ((u16) filter[7] << 8));
3883
3884	gma_write16(hw, port, GM_RX_CTRL, reg);
3885}
3886
3887static void sky2_get_stats(struct net_device *dev,
3888			   struct rtnl_link_stats64 *stats)
3889{
3890	struct sky2_port *sky2 = netdev_priv(dev);
3891	struct sky2_hw *hw = sky2->hw;
3892	unsigned port = sky2->port;
3893	unsigned int start;
3894	u64 _bytes, _packets;
3895
3896	do {
3897		start = u64_stats_fetch_begin(&sky2->rx_stats.syncp);
3898		_bytes = sky2->rx_stats.bytes;
3899		_packets = sky2->rx_stats.packets;
3900	} while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start));
3901
3902	stats->rx_packets = _packets;
3903	stats->rx_bytes = _bytes;
3904
3905	do {
3906		start = u64_stats_fetch_begin(&sky2->tx_stats.syncp);
3907		_bytes = sky2->tx_stats.bytes;
3908		_packets = sky2->tx_stats.packets;
3909	} while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start));
3910
3911	stats->tx_packets = _packets;
3912	stats->tx_bytes = _bytes;
3913
3914	stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK)
3915		+ get_stats32(hw, port, GM_RXF_BC_OK);
3916
3917	stats->collisions = get_stats32(hw, port, GM_TXF_COL);
3918
3919	stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR);
3920	stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR);
3921	stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT)
3922		+ get_stats32(hw, port, GM_RXE_FRAG);
3923	stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV);
3924
3925	stats->rx_dropped = dev->stats.rx_dropped;
3926	stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
3927	stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
3928}
3929
3930/* Can have one global because blinking is controlled by
3931 * ethtool and that is always under RTNL mutex
3932 */
3933static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
3934{
3935	struct sky2_hw *hw = sky2->hw;
3936	unsigned port = sky2->port;
3937
3938	spin_lock_bh(&sky2->phy_lock);
3939	if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3940	    hw->chip_id == CHIP_ID_YUKON_EX ||
3941	    hw->chip_id == CHIP_ID_YUKON_SUPR) {
3942		u16 pg;
3943		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3944		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
3945
3946		switch (mode) {
3947		case MO_LED_OFF:
3948			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3949				     PHY_M_LEDC_LOS_CTRL(8) |
3950				     PHY_M_LEDC_INIT_CTRL(8) |
3951				     PHY_M_LEDC_STA1_CTRL(8) |
3952				     PHY_M_LEDC_STA0_CTRL(8));
3953			break;
3954		case MO_LED_ON:
3955			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3956				     PHY_M_LEDC_LOS_CTRL(9) |
3957				     PHY_M_LEDC_INIT_CTRL(9) |
3958				     PHY_M_LEDC_STA1_CTRL(9) |
3959				     PHY_M_LEDC_STA0_CTRL(9));
3960			break;
3961		case MO_LED_BLINK:
3962			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3963				     PHY_M_LEDC_LOS_CTRL(0xa) |
3964				     PHY_M_LEDC_INIT_CTRL(0xa) |
3965				     PHY_M_LEDC_STA1_CTRL(0xa) |
3966				     PHY_M_LEDC_STA0_CTRL(0xa));
3967			break;
3968		case MO_LED_NORM:
3969			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3970				     PHY_M_LEDC_LOS_CTRL(1) |
3971				     PHY_M_LEDC_INIT_CTRL(8) |
3972				     PHY_M_LEDC_STA1_CTRL(7) |
3973				     PHY_M_LEDC_STA0_CTRL(7));
3974		}
3975
3976		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3977	} else
3978		gm_phy_write(hw, port, PHY_MARV_LED_OVER,
3979				     PHY_M_LED_MO_DUP(mode) |
3980				     PHY_M_LED_MO_10(mode) |
3981				     PHY_M_LED_MO_100(mode) |
3982				     PHY_M_LED_MO_1000(mode) |
3983				     PHY_M_LED_MO_RX(mode) |
3984				     PHY_M_LED_MO_TX(mode));
3985
3986	spin_unlock_bh(&sky2->phy_lock);
3987}
3988
3989/* blink LED's for finding board */
3990static int sky2_set_phys_id(struct net_device *dev,
3991			    enum ethtool_phys_id_state state)
3992{
3993	struct sky2_port *sky2 = netdev_priv(dev);
3994
3995	switch (state) {
3996	case ETHTOOL_ID_ACTIVE:
3997		return 1;	/* cycle on/off once per second */
3998	case ETHTOOL_ID_INACTIVE:
3999		sky2_led(sky2, MO_LED_NORM);
4000		break;
4001	case ETHTOOL_ID_ON:
4002		sky2_led(sky2, MO_LED_ON);
4003		break;
4004	case ETHTOOL_ID_OFF:
4005		sky2_led(sky2, MO_LED_OFF);
4006		break;
4007	}
4008
4009	return 0;
4010}
4011
4012static void sky2_get_pauseparam(struct net_device *dev,
4013				struct ethtool_pauseparam *ecmd)
4014{
4015	struct sky2_port *sky2 = netdev_priv(dev);
4016
4017	switch (sky2->flow_mode) {
4018	case FC_NONE:
4019		ecmd->tx_pause = ecmd->rx_pause = 0;
4020		break;
4021	case FC_TX:
4022		ecmd->tx_pause = 1, ecmd->rx_pause = 0;
4023		break;
4024	case FC_RX:
4025		ecmd->tx_pause = 0, ecmd->rx_pause = 1;
4026		break;
4027	case FC_BOTH:
4028		ecmd->tx_pause = ecmd->rx_pause = 1;
4029	}
4030
4031	ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE)
4032		? AUTONEG_ENABLE : AUTONEG_DISABLE;
4033}
4034
4035static int sky2_set_pauseparam(struct net_device *dev,
4036			       struct ethtool_pauseparam *ecmd)
4037{
4038	struct sky2_port *sky2 = netdev_priv(dev);
4039
4040	if (ecmd->autoneg == AUTONEG_ENABLE)
4041		sky2->flags |= SKY2_FLAG_AUTO_PAUSE;
4042	else
4043		sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE;
4044
4045	sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
4046
4047	if (netif_running(dev))
4048		sky2_phy_reinit(sky2);
4049
4050	return 0;
4051}
4052
4053static int sky2_get_coalesce(struct net_device *dev,
4054			     struct ethtool_coalesce *ecmd,
4055			     struct kernel_ethtool_coalesce *kernel_coal,
4056			     struct netlink_ext_ack *extack)
4057{
4058	struct sky2_port *sky2 = netdev_priv(dev);
4059	struct sky2_hw *hw = sky2->hw;
4060
4061	if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
4062		ecmd->tx_coalesce_usecs = 0;
4063	else {
4064		u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
4065		ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
4066	}
4067	ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
4068
4069	if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
4070		ecmd->rx_coalesce_usecs = 0;
4071	else {
4072		u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
4073		ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
4074	}
4075	ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
4076
4077	if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
4078		ecmd->rx_coalesce_usecs_irq = 0;
4079	else {
4080		u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
4081		ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
4082	}
4083
4084	ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
4085
4086	return 0;
4087}
4088
4089/* Note: this affect both ports */
4090static int sky2_set_coalesce(struct net_device *dev,
4091			     struct ethtool_coalesce *ecmd,
4092			     struct kernel_ethtool_coalesce *kernel_coal,
4093			     struct netlink_ext_ack *extack)
4094{
4095	struct sky2_port *sky2 = netdev_priv(dev);
4096	struct sky2_hw *hw = sky2->hw;
4097	const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
4098
4099	if (ecmd->tx_coalesce_usecs > tmax ||
4100	    ecmd->rx_coalesce_usecs > tmax ||
4101	    ecmd->rx_coalesce_usecs_irq > tmax)
4102		return -EINVAL;
4103
4104	if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1)
4105		return -EINVAL;
4106	if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
4107		return -EINVAL;
4108	if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING)
4109		return -EINVAL;
4110
4111	if (ecmd->tx_coalesce_usecs == 0)
4112		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
4113	else {
4114		sky2_write32(hw, STAT_TX_TIMER_INI,
4115			     sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
4116		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
4117	}
4118	sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
4119
4120	if (ecmd->rx_coalesce_usecs == 0)
4121		sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
4122	else {
4123		sky2_write32(hw, STAT_LEV_TIMER_INI,
4124			     sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
4125		sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
4126	}
4127	sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
4128
4129	if (ecmd->rx_coalesce_usecs_irq == 0)
4130		sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
4131	else {
4132		sky2_write32(hw, STAT_ISR_TIMER_INI,
4133			     sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
4134		sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
4135	}
4136	sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
4137	return 0;
4138}
4139
4140/*
4141 * Hardware is limited to min of 128 and max of 2048 for ring size
4142 * and  rounded up to next power of two
4143 * to avoid division in modulus calculation
4144 */
4145static unsigned long roundup_ring_size(unsigned long pending)
4146{
4147	return max(128ul, roundup_pow_of_two(pending+1));
4148}
4149
4150static void sky2_get_ringparam(struct net_device *dev,
4151			       struct ethtool_ringparam *ering,
4152			       struct kernel_ethtool_ringparam *kernel_ering,
4153			       struct netlink_ext_ack *extack)
4154{
4155	struct sky2_port *sky2 = netdev_priv(dev);
4156
4157	ering->rx_max_pending = RX_MAX_PENDING;
4158	ering->tx_max_pending = TX_MAX_PENDING;
4159
4160	ering->rx_pending = sky2->rx_pending;
4161	ering->tx_pending = sky2->tx_pending;
4162}
4163
4164static int sky2_set_ringparam(struct net_device *dev,
4165			      struct ethtool_ringparam *ering,
4166			      struct kernel_ethtool_ringparam *kernel_ering,
4167			      struct netlink_ext_ack *extack)
4168{
4169	struct sky2_port *sky2 = netdev_priv(dev);
4170
4171	if (ering->rx_pending > RX_MAX_PENDING ||
4172	    ering->rx_pending < 8 ||
4173	    ering->tx_pending < TX_MIN_PENDING ||
4174	    ering->tx_pending > TX_MAX_PENDING)
4175		return -EINVAL;
4176
4177	sky2_detach(dev);
4178
4179	sky2->rx_pending = ering->rx_pending;
4180	sky2->tx_pending = ering->tx_pending;
4181	sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending);
4182
4183	return sky2_reattach(dev);
4184}
4185
4186static int sky2_get_regs_len(struct net_device *dev)
4187{
4188	return 0x4000;
4189}
4190
4191static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
4192{
4193	/* This complicated switch statement is to make sure and
4194	 * only access regions that are unreserved.
4195	 * Some blocks are only valid on dual port cards.
4196	 */
4197	switch (b) {
4198	/* second port */
4199	case 5:		/* Tx Arbiter 2 */
4200	case 9:		/* RX2 */
4201	case 14 ... 15:	/* TX2 */
4202	case 17: case 19: /* Ram Buffer 2 */
4203	case 22 ... 23: /* Tx Ram Buffer 2 */
4204	case 25:	/* Rx MAC Fifo 1 */
4205	case 27:	/* Tx MAC Fifo 2 */
4206	case 31:	/* GPHY 2 */
4207	case 40 ... 47: /* Pattern Ram 2 */
4208	case 52: case 54: /* TCP Segmentation 2 */
4209	case 112 ... 116: /* GMAC 2 */
4210		return hw->ports > 1;
4211
4212	case 0:		/* Control */
4213	case 2:		/* Mac address */
4214	case 4:		/* Tx Arbiter 1 */
4215	case 7:		/* PCI express reg */
4216	case 8:		/* RX1 */
4217	case 12 ... 13: /* TX1 */
4218	case 16: case 18:/* Rx Ram Buffer 1 */
4219	case 20 ... 21: /* Tx Ram Buffer 1 */
4220	case 24:	/* Rx MAC Fifo 1 */
4221	case 26:	/* Tx MAC Fifo 1 */
4222	case 28 ... 29: /* Descriptor and status unit */
4223	case 30:	/* GPHY 1*/
4224	case 32 ... 39: /* Pattern Ram 1 */
4225	case 48: case 50: /* TCP Segmentation 1 */
4226	case 56 ... 60:	/* PCI space */
4227	case 80 ... 84:	/* GMAC 1 */
4228		return 1;
4229
4230	default:
4231		return 0;
4232	}
4233}
4234
4235/*
4236 * Returns copy of control register region
4237 * Note: ethtool_get_regs always provides full size (16k) buffer
4238 */
4239static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4240			  void *p)
4241{
4242	const struct sky2_port *sky2 = netdev_priv(dev);
4243	const void __iomem *io = sky2->hw->regs;
4244	unsigned int b;
4245
4246	regs->version = 1;
4247
4248	for (b = 0; b < 128; b++) {
4249		/* skip poisonous diagnostic ram region in block 3 */
4250		if (b == 3)
4251			memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
4252		else if (sky2_reg_access_ok(sky2->hw, b))
4253			memcpy_fromio(p, io, 128);
4254		else
4255			memset(p, 0, 128);
4256
4257		p += 128;
4258		io += 128;
4259	}
4260}
4261
4262static int sky2_get_eeprom_len(struct net_device *dev)
4263{
4264	struct sky2_port *sky2 = netdev_priv(dev);
4265	struct sky2_hw *hw = sky2->hw;
4266	u16 reg2;
4267
4268	reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
4269	return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
4270}
4271
4272static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4273			   u8 *data)
4274{
4275	struct sky2_port *sky2 = netdev_priv(dev);
4276	int rc;
4277
4278	eeprom->magic = SKY2_EEPROM_MAGIC;
4279	rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
4280			      data);
4281	if (rc < 0)
4282		return rc;
4283
4284	eeprom->len = rc;
4285
4286	return 0;
4287}
4288
4289static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4290			   u8 *data)
4291{
4292	struct sky2_port *sky2 = netdev_priv(dev);
4293	int rc;
4294
4295	if (eeprom->magic != SKY2_EEPROM_MAGIC)
4296		return -EINVAL;
4297
4298	rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
4299			       data);
4300
4301	return rc < 0 ? rc : 0;
4302}
4303
4304static netdev_features_t sky2_fix_features(struct net_device *dev,
4305	netdev_features_t features)
4306{
4307	const struct sky2_port *sky2 = netdev_priv(dev);
4308	const struct sky2_hw *hw = sky2->hw;
4309
4310	/* In order to do Jumbo packets on these chips, need to turn off the
4311	 * transmit store/forward. Therefore checksum offload won't work.
4312	 */
4313	if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) {
4314		netdev_info(dev, "checksum offload not possible with jumbo frames\n");
4315		features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_CSUM_MASK);
4316	}
4317
4318	/* Some hardware requires receive checksum for RSS to work. */
4319	if ( (features & NETIF_F_RXHASH) &&
4320	     !(features & NETIF_F_RXCSUM) &&
4321	     (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) {
4322		netdev_info(dev, "receive hashing forces receive checksum\n");
4323		features |= NETIF_F_RXCSUM;
4324	}
4325
4326	return features;
4327}
4328
4329static int sky2_set_features(struct net_device *dev, netdev_features_t features)
4330{
4331	struct sky2_port *sky2 = netdev_priv(dev);
4332	netdev_features_t changed = dev->features ^ features;
4333
4334	if ((changed & NETIF_F_RXCSUM) &&
4335	    !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
4336		sky2_write32(sky2->hw,
4337			     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
4338			     (features & NETIF_F_RXCSUM)
4339			     ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
4340	}
4341
4342	if (changed & NETIF_F_RXHASH)
4343		rx_set_rss(dev, features);
4344
4345	if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4346		sky2_vlan_mode(dev, features);
4347
4348	return 0;
4349}
4350
4351static const struct ethtool_ops sky2_ethtool_ops = {
4352	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4353				     ETHTOOL_COALESCE_MAX_FRAMES |
4354				     ETHTOOL_COALESCE_RX_USECS_IRQ |
4355				     ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ,
4356	.get_drvinfo	= sky2_get_drvinfo,
4357	.get_wol	= sky2_get_wol,
4358	.set_wol	= sky2_set_wol,
4359	.get_msglevel	= sky2_get_msglevel,
4360	.set_msglevel	= sky2_set_msglevel,
4361	.nway_reset	= sky2_nway_reset,
4362	.get_regs_len	= sky2_get_regs_len,
4363	.get_regs	= sky2_get_regs,
4364	.get_link	= ethtool_op_get_link,
4365	.get_eeprom_len	= sky2_get_eeprom_len,
4366	.get_eeprom	= sky2_get_eeprom,
4367	.set_eeprom	= sky2_set_eeprom,
4368	.get_strings	= sky2_get_strings,
4369	.get_coalesce	= sky2_get_coalesce,
4370	.set_coalesce	= sky2_set_coalesce,
4371	.get_ringparam	= sky2_get_ringparam,
4372	.set_ringparam	= sky2_set_ringparam,
4373	.get_pauseparam = sky2_get_pauseparam,
4374	.set_pauseparam = sky2_set_pauseparam,
4375	.set_phys_id	= sky2_set_phys_id,
4376	.get_sset_count = sky2_get_sset_count,
4377	.get_ethtool_stats = sky2_get_ethtool_stats,
4378	.get_link_ksettings = sky2_get_link_ksettings,
4379	.set_link_ksettings = sky2_set_link_ksettings,
4380};
4381
4382#ifdef CONFIG_SKY2_DEBUG
4383
4384static struct dentry *sky2_debug;
4385
4386static int sky2_debug_show(struct seq_file *seq, void *v)
4387{
4388	struct net_device *dev = seq->private;
4389	const struct sky2_port *sky2 = netdev_priv(dev);
4390	struct sky2_hw *hw = sky2->hw;
4391	unsigned port = sky2->port;
4392	unsigned idx, last;
4393	int sop;
4394
4395	seq_printf(seq, "IRQ src=%x mask=%x control=%x\n",
4396		   sky2_read32(hw, B0_ISRC),
4397		   sky2_read32(hw, B0_IMSK),
4398		   sky2_read32(hw, B0_Y2_SP_ICR));
4399
4400	if (!netif_running(dev)) {
4401		seq_puts(seq, "network not running\n");
4402		return 0;
4403	}
4404
4405	napi_disable(&hw->napi);
4406	last = sky2_read16(hw, STAT_PUT_IDX);
4407
4408	seq_printf(seq, "Status ring %u\n", hw->st_size);
4409	if (hw->st_idx == last)
4410		seq_puts(seq, "Status ring (empty)\n");
4411	else {
4412		seq_puts(seq, "Status ring\n");
4413		for (idx = hw->st_idx; idx != last && idx < hw->st_size;
4414		     idx = RING_NEXT(idx, hw->st_size)) {
4415			const struct sky2_status_le *le = hw->st_le + idx;
4416			seq_printf(seq, "[%d] %#x %d %#x\n",
4417				   idx, le->opcode, le->length, le->status);
4418		}
4419		seq_puts(seq, "\n");
4420	}
4421
4422	seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n",
4423		   sky2->tx_cons, sky2->tx_prod,
4424		   sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
4425		   sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE)));
4426
4427	/* Dump contents of tx ring */
4428	sop = 1;
4429	for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size;
4430	     idx = RING_NEXT(idx, sky2->tx_ring_size)) {
4431		const struct sky2_tx_le *le = sky2->tx_le + idx;
4432		u32 a = le32_to_cpu(le->addr);
4433
4434		if (sop)
4435			seq_printf(seq, "%u:", idx);
4436		sop = 0;
4437
4438		switch (le->opcode & ~HW_OWNER) {
4439		case OP_ADDR64:
4440			seq_printf(seq, " %#x:", a);
4441			break;
4442		case OP_LRGLEN:
4443			seq_printf(seq, " mtu=%d", a);
4444			break;
4445		case OP_VLAN:
4446			seq_printf(seq, " vlan=%d", be16_to_cpu(le->length));
4447			break;
4448		case OP_TCPLISW:
4449			seq_printf(seq, " csum=%#x", a);
4450			break;
4451		case OP_LARGESEND:
4452			seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length));
4453			break;
4454		case OP_PACKET:
4455			seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length));
4456			break;
4457		case OP_BUFFER:
4458			seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length));
4459			break;
4460		default:
4461			seq_printf(seq, " op=%#x,%#x(%d)", le->opcode,
4462				   a, le16_to_cpu(le->length));
4463		}
4464
4465		if (le->ctrl & EOP) {
4466			seq_putc(seq, '\n');
4467			sop = 1;
4468		}
4469	}
4470
4471	seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n",
4472		   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)),
4473		   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)),
4474		   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX)));
4475
4476	sky2_read32(hw, B0_Y2_SP_LISR);
4477	napi_enable(&hw->napi);
4478	return 0;
4479}
4480DEFINE_SHOW_ATTRIBUTE(sky2_debug);
4481
4482/*
4483 * Use network device events to create/remove/rename
4484 * debugfs file entries
4485 */
4486static int sky2_device_event(struct notifier_block *unused,
4487			     unsigned long event, void *ptr)
4488{
4489	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4490	struct sky2_port *sky2 = netdev_priv(dev);
4491
4492	if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
4493		return NOTIFY_DONE;
4494
4495	switch (event) {
4496	case NETDEV_CHANGENAME:
4497		if (sky2->debugfs) {
4498			sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
4499						       sky2_debug, dev->name);
4500		}
4501		break;
4502
4503	case NETDEV_GOING_DOWN:
4504		if (sky2->debugfs) {
4505			netdev_printk(KERN_DEBUG, dev, "remove debugfs\n");
4506			debugfs_remove(sky2->debugfs);
4507			sky2->debugfs = NULL;
4508		}
4509		break;
4510
4511	case NETDEV_UP:
4512		sky2->debugfs = debugfs_create_file(dev->name, 0444,
4513						    sky2_debug, dev,
4514						    &sky2_debug_fops);
4515		if (IS_ERR(sky2->debugfs))
4516			sky2->debugfs = NULL;
4517	}
4518
4519	return NOTIFY_DONE;
4520}
4521
4522static struct notifier_block sky2_notifier = {
4523	.notifier_call = sky2_device_event,
4524};
4525
4526
4527static __init void sky2_debug_init(void)
4528{
4529	struct dentry *ent;
4530
4531	ent = debugfs_create_dir("sky2", NULL);
4532	if (IS_ERR(ent))
4533		return;
4534
4535	sky2_debug = ent;
4536	register_netdevice_notifier(&sky2_notifier);
4537}
4538
4539static __exit void sky2_debug_cleanup(void)
4540{
4541	if (sky2_debug) {
4542		unregister_netdevice_notifier(&sky2_notifier);
4543		debugfs_remove(sky2_debug);
4544		sky2_debug = NULL;
4545	}
4546}
4547
4548#else
4549#define sky2_debug_init()
4550#define sky2_debug_cleanup()
4551#endif
4552
4553/* Two copies of network device operations to handle special case of
4554 * not allowing netpoll on second port
4555 */
4556static const struct net_device_ops sky2_netdev_ops[2] = {
4557  {
4558	.ndo_open		= sky2_open,
4559	.ndo_stop		= sky2_close,
4560	.ndo_start_xmit		= sky2_xmit_frame,
4561	.ndo_eth_ioctl		= sky2_ioctl,
4562	.ndo_validate_addr	= eth_validate_addr,
4563	.ndo_set_mac_address	= sky2_set_mac_address,
4564	.ndo_set_rx_mode	= sky2_set_multicast,
4565	.ndo_change_mtu		= sky2_change_mtu,
4566	.ndo_fix_features	= sky2_fix_features,
4567	.ndo_set_features	= sky2_set_features,
4568	.ndo_tx_timeout		= sky2_tx_timeout,
4569	.ndo_get_stats64	= sky2_get_stats,
4570#ifdef CONFIG_NET_POLL_CONTROLLER
4571	.ndo_poll_controller	= sky2_netpoll,
4572#endif
4573  },
4574  {
4575	.ndo_open		= sky2_open,
4576	.ndo_stop		= sky2_close,
4577	.ndo_start_xmit		= sky2_xmit_frame,
4578	.ndo_eth_ioctl		= sky2_ioctl,
4579	.ndo_validate_addr	= eth_validate_addr,
4580	.ndo_set_mac_address	= sky2_set_mac_address,
4581	.ndo_set_rx_mode	= sky2_set_multicast,
4582	.ndo_change_mtu		= sky2_change_mtu,
4583	.ndo_fix_features	= sky2_fix_features,
4584	.ndo_set_features	= sky2_set_features,
4585	.ndo_tx_timeout		= sky2_tx_timeout,
4586	.ndo_get_stats64	= sky2_get_stats,
4587  },
4588};
4589
4590/* Initialize network device */
4591static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4592					   int highmem, int wol)
4593{
4594	struct sky2_port *sky2;
4595	struct net_device *dev = alloc_etherdev(sizeof(*sky2));
4596	int ret;
4597
4598	if (!dev)
4599		return NULL;
4600
4601	SET_NETDEV_DEV(dev, &hw->pdev->dev);
4602	dev->irq = hw->pdev->irq;
4603	dev->ethtool_ops = &sky2_ethtool_ops;
4604	dev->watchdog_timeo = TX_WATCHDOG;
4605	dev->netdev_ops = &sky2_netdev_ops[port];
4606
4607	sky2 = netdev_priv(dev);
4608	sky2->netdev = dev;
4609	sky2->hw = hw;
4610	sky2->msg_enable = netif_msg_init(debug, default_msg);
4611
4612	u64_stats_init(&sky2->tx_stats.syncp);
4613	u64_stats_init(&sky2->rx_stats.syncp);
4614
4615	/* Auto speed and flow control */
4616	sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
4617	if (hw->chip_id != CHIP_ID_YUKON_XL)
4618		dev->hw_features |= NETIF_F_RXCSUM;
4619
4620	sky2->flow_mode = FC_BOTH;
4621
4622	sky2->duplex = -1;
4623	sky2->speed = -1;
4624	sky2->advertising = sky2_supported_modes(hw);
4625	sky2->wol = wol;
4626
4627	spin_lock_init(&sky2->phy_lock);
4628
4629	sky2->tx_pending = TX_DEF_PENDING;
4630	sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING);
4631	sky2->rx_pending = RX_DEF_PENDING;
4632
4633	hw->dev[port] = dev;
4634
4635	sky2->port = port;
4636
4637	dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
4638
4639	if (highmem)
4640		dev->features |= NETIF_F_HIGHDMA;
4641
4642	/* Enable receive hashing unless hardware is known broken */
4643	if (!(hw->flags & SKY2_HW_RSS_BROKEN))
4644		dev->hw_features |= NETIF_F_RXHASH;
4645
4646	if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
4647		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
4648				    NETIF_F_HW_VLAN_CTAG_RX;
4649		dev->vlan_features |= SKY2_VLAN_OFFLOADS;
4650	}
4651
4652	dev->features |= dev->hw_features;
4653
4654	/* MTU range: 60 - 1500 or 9000 */
4655	dev->min_mtu = ETH_ZLEN;
4656	if (hw->chip_id == CHIP_ID_YUKON_FE ||
4657	    hw->chip_id == CHIP_ID_YUKON_FE_P)
4658		dev->max_mtu = ETH_DATA_LEN;
4659	else
4660		dev->max_mtu = ETH_JUMBO_MTU;
4661
4662	/* try to get mac address in the following order:
4663	 * 1) from device tree data
4664	 * 2) from internal registers set by bootloader
4665	 */
4666	ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev);
4667	if (ret) {
4668		u8 addr[ETH_ALEN];
4669
4670		memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
4671		eth_hw_addr_set(dev, addr);
4672	}
4673
4674	/* if the address is invalid, use a random value */
4675	if (!is_valid_ether_addr(dev->dev_addr)) {
4676		struct sockaddr sa = { AF_UNSPEC };
4677
4678		dev_warn(&hw->pdev->dev, "Invalid MAC address, defaulting to random\n");
4679		eth_hw_addr_random(dev);
4680		memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
4681		if (sky2_set_mac_address(dev, &sa))
4682			dev_warn(&hw->pdev->dev, "Failed to set MAC address.\n");
4683	}
4684
4685	return dev;
4686}
4687
4688static void sky2_show_addr(struct net_device *dev)
4689{
4690	const struct sky2_port *sky2 = netdev_priv(dev);
4691
4692	netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
4693}
4694
4695/* Handle software interrupt used during MSI test */
4696static irqreturn_t sky2_test_intr(int irq, void *dev_id)
4697{
4698	struct sky2_hw *hw = dev_id;
4699	u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
4700
4701	if (status == 0)
4702		return IRQ_NONE;
4703
4704	if (status & Y2_IS_IRQ_SW) {
4705		hw->flags |= SKY2_HW_USE_MSI;
4706		wake_up(&hw->msi_wait);
4707		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4708	}
4709	sky2_write32(hw, B0_Y2_SP_ICR, 2);
4710
4711	return IRQ_HANDLED;
4712}
4713
4714/* Test interrupt path by forcing a software IRQ */
4715static int sky2_test_msi(struct sky2_hw *hw)
4716{
4717	struct pci_dev *pdev = hw->pdev;
4718	int err;
4719
4720	init_waitqueue_head(&hw->msi_wait);
4721
4722	err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
4723	if (err) {
4724		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4725		return err;
4726	}
4727
4728	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4729
4730	sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
4731	sky2_read8(hw, B0_CTST);
4732
4733	wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
4734
4735	if (!(hw->flags & SKY2_HW_USE_MSI)) {
4736		/* MSI test failed, go back to INTx mode */
4737		dev_info(&pdev->dev, "No interrupt generated using MSI, "
4738			 "switching to INTx mode.\n");
4739
4740		err = -EOPNOTSUPP;
4741		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4742	}
4743
4744	sky2_write32(hw, B0_IMSK, 0);
4745	sky2_read32(hw, B0_IMSK);
4746
4747	free_irq(pdev->irq, hw);
4748
4749	return err;
4750}
4751
4752/* This driver supports yukon2 chipset only */
4753static const char *sky2_name(u8 chipid, char *buf, int sz)
4754{
4755	static const char *const name[] = {
4756		"XL",		/* 0xb3 */
4757		"EC Ultra", 	/* 0xb4 */
4758		"Extreme",	/* 0xb5 */
4759		"EC",		/* 0xb6 */
4760		"FE",		/* 0xb7 */
4761		"FE+",		/* 0xb8 */
4762		"Supreme",	/* 0xb9 */
4763		"UL 2",		/* 0xba */
4764		"Unknown",	/* 0xbb */
4765		"Optima",	/* 0xbc */
4766		"OptimaEEE",    /* 0xbd */
4767		"Optima 2",	/* 0xbe */
4768	};
4769
4770	if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2)
4771		snprintf(buf, sz, "%s", name[chipid - CHIP_ID_YUKON_XL]);
4772	else
4773		snprintf(buf, sz, "(chip %#x)", chipid);
4774	return buf;
4775}
4776
4777static const struct dmi_system_id msi_blacklist[] = {
4778	{
4779		.ident = "Dell Inspiron 1545",
4780		.matches = {
4781			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
4782			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
4783		},
4784	},
4785	{
4786		.ident = "Gateway P-79",
4787		.matches = {
4788			DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
4789			DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
4790		},
4791	},
4792	{
4793		.ident = "ASUS P5W DH Deluxe",
4794		.matches = {
4795			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTEK COMPUTER INC"),
4796			DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
4797		},
4798	},
4799	{
4800		.ident = "ASUS P6T",
4801		.matches = {
4802			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4803			DMI_MATCH(DMI_BOARD_NAME, "P6T"),
4804		},
4805	},
4806	{
4807		.ident = "ASUS P6X",
4808		.matches = {
4809			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4810			DMI_MATCH(DMI_BOARD_NAME, "P6X"),
4811		},
4812	},
4813	{}
4814};
4815
4816static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4817{
4818	struct net_device *dev, *dev1;
4819	struct sky2_hw *hw;
4820	int err, using_dac = 0, wol_default;
4821	u32 reg;
4822	char buf1[16];
4823
4824	err = pci_enable_device(pdev);
4825	if (err) {
4826		dev_err(&pdev->dev, "cannot enable PCI device\n");
4827		goto err_out;
4828	}
4829
4830	/* Get configuration information
4831	 * Note: only regular PCI config access once to test for HW issues
4832	 *       other PCI access through shared memory for speed and to
4833	 *	 avoid MMCONFIG problems.
4834	 */
4835	err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
4836	if (err) {
4837		dev_err(&pdev->dev, "PCI read config failed\n");
4838		goto err_out_disable;
4839	}
4840
4841	if (~reg == 0) {
4842		dev_err(&pdev->dev, "PCI configuration read error\n");
4843		err = -EIO;
4844		goto err_out_disable;
4845	}
4846
4847	err = pci_request_regions(pdev, DRV_NAME);
4848	if (err) {
4849		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
4850		goto err_out_disable;
4851	}
4852
4853	pci_set_master(pdev);
4854
4855	if (sizeof(dma_addr_t) > sizeof(u32) &&
4856	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4857		using_dac = 1;
4858		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4859		if (err < 0) {
4860			dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
4861				"for consistent allocations\n");
4862			goto err_out_free_regions;
4863		}
4864	} else {
4865		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4866		if (err) {
4867			dev_err(&pdev->dev, "no usable DMA configuration\n");
4868			goto err_out_free_regions;
4869		}
4870	}
4871
4872
4873#ifdef __BIG_ENDIAN
4874	/* The sk98lin vendor driver uses hardware byte swapping but
4875	 * this driver uses software swapping.
4876	 */
4877	reg &= ~PCI_REV_DESC;
4878	err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
4879	if (err) {
4880		dev_err(&pdev->dev, "PCI write config failed\n");
4881		goto err_out_free_regions;
4882	}
4883#endif
4884
4885	wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4886
4887	err = -ENOMEM;
4888
4889	hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
4890		     + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
4891	if (!hw)
4892		goto err_out_free_regions;
4893
4894	hw->pdev = pdev;
4895	sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
4896
4897	hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
4898	if (!hw->regs) {
4899		dev_err(&pdev->dev, "cannot map device registers\n");
4900		goto err_out_free_hw;
4901	}
4902
4903	err = sky2_init(hw);
4904	if (err)
4905		goto err_out_iounmap;
4906
4907	/* ring for status responses */
4908	hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
4909	hw->st_le = dma_alloc_coherent(&pdev->dev,
4910				       hw->st_size * sizeof(struct sky2_status_le),
4911				       &hw->st_dma, GFP_KERNEL);
4912	if (!hw->st_le) {
4913		err = -ENOMEM;
4914		goto err_out_reset;
4915	}
4916
4917	dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
4918		 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
4919
4920	sky2_reset(hw);
4921
4922	dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
4923	if (!dev) {
4924		err = -ENOMEM;
4925		goto err_out_free_pci;
4926	}
4927
4928	if (disable_msi == -1)
4929		disable_msi = !!dmi_check_system(msi_blacklist);
4930
4931	if (!disable_msi && pci_enable_msi(pdev) == 0) {
4932		err = sky2_test_msi(hw);
4933		if (err) {
4934			pci_disable_msi(pdev);
4935			if (err != -EOPNOTSUPP)
4936				goto err_out_free_netdev;
4937		}
4938	}
4939
4940	netif_napi_add(dev, &hw->napi, sky2_poll);
4941
4942	err = register_netdev(dev);
4943	if (err) {
4944		dev_err(&pdev->dev, "cannot register net device\n");
4945		goto err_out_free_netdev;
4946	}
4947
4948	netif_carrier_off(dev);
4949
4950	sky2_show_addr(dev);
4951
4952	if (hw->ports > 1) {
4953		dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
4954		if (!dev1) {
4955			err = -ENOMEM;
4956			goto err_out_unregister;
4957		}
4958
4959		err = register_netdev(dev1);
4960		if (err) {
4961			dev_err(&pdev->dev, "cannot register second net device\n");
4962			goto err_out_free_dev1;
4963		}
4964
4965		err = sky2_setup_irq(hw, hw->irq_name);
4966		if (err)
4967			goto err_out_unregister_dev1;
4968
4969		sky2_show_addr(dev1);
4970	}
4971
4972	timer_setup(&hw->watchdog_timer, sky2_watchdog, 0);
4973	INIT_WORK(&hw->restart_work, sky2_restart);
4974
4975	pci_set_drvdata(pdev, hw);
4976	pdev->d3hot_delay = 300;
4977
4978	return 0;
4979
4980err_out_unregister_dev1:
4981	unregister_netdev(dev1);
4982err_out_free_dev1:
4983	free_netdev(dev1);
4984err_out_unregister:
4985	unregister_netdev(dev);
4986err_out_free_netdev:
4987	if (hw->flags & SKY2_HW_USE_MSI)
4988		pci_disable_msi(pdev);
4989	free_netdev(dev);
4990err_out_free_pci:
4991	dma_free_coherent(&pdev->dev,
4992			  hw->st_size * sizeof(struct sky2_status_le),
4993			  hw->st_le, hw->st_dma);
4994err_out_reset:
4995	sky2_write8(hw, B0_CTST, CS_RST_SET);
4996err_out_iounmap:
4997	iounmap(hw->regs);
4998err_out_free_hw:
4999	kfree(hw);
5000err_out_free_regions:
5001	pci_release_regions(pdev);
5002err_out_disable:
5003	pci_disable_device(pdev);
5004err_out:
5005	return err;
5006}
5007
5008static void sky2_remove(struct pci_dev *pdev)
5009{
5010	struct sky2_hw *hw = pci_get_drvdata(pdev);
5011	int i;
5012
5013	if (!hw)
5014		return;
5015
5016	timer_shutdown_sync(&hw->watchdog_timer);
5017	cancel_work_sync(&hw->restart_work);
5018
5019	for (i = hw->ports-1; i >= 0; --i)
5020		unregister_netdev(hw->dev[i]);
5021
5022	sky2_write32(hw, B0_IMSK, 0);
5023	sky2_read32(hw, B0_IMSK);
5024
5025	sky2_power_aux(hw);
5026
5027	sky2_write8(hw, B0_CTST, CS_RST_SET);
5028	sky2_read8(hw, B0_CTST);
5029
5030	if (hw->ports > 1) {
5031		napi_disable(&hw->napi);
5032		free_irq(pdev->irq, hw);
5033	}
5034
5035	if (hw->flags & SKY2_HW_USE_MSI)
5036		pci_disable_msi(pdev);
5037	dma_free_coherent(&pdev->dev,
5038			  hw->st_size * sizeof(struct sky2_status_le),
5039			  hw->st_le, hw->st_dma);
5040	pci_release_regions(pdev);
5041	pci_disable_device(pdev);
5042
5043	for (i = hw->ports-1; i >= 0; --i)
5044		free_netdev(hw->dev[i]);
5045
5046	iounmap(hw->regs);
5047	kfree(hw);
5048}
5049
5050static int sky2_suspend(struct device *dev)
5051{
5052	struct sky2_hw *hw = dev_get_drvdata(dev);
5053	int i;
5054
5055	if (!hw)
5056		return 0;
5057
5058	del_timer_sync(&hw->watchdog_timer);
5059	cancel_work_sync(&hw->restart_work);
5060
5061	rtnl_lock();
5062
5063	sky2_all_down(hw);
5064	for (i = 0; i < hw->ports; i++) {
5065		struct net_device *dev = hw->dev[i];
5066		struct sky2_port *sky2 = netdev_priv(dev);
5067
5068		if (sky2->wol)
5069			sky2_wol_init(sky2);
5070	}
5071
5072	sky2_power_aux(hw);
5073	rtnl_unlock();
5074
5075	return 0;
5076}
5077
5078#ifdef CONFIG_PM_SLEEP
5079static int sky2_resume(struct device *dev)
5080{
5081	struct pci_dev *pdev = to_pci_dev(dev);
5082	struct sky2_hw *hw = pci_get_drvdata(pdev);
5083	int err;
5084
5085	if (!hw)
5086		return 0;
5087
5088	/* Re-enable all clocks */
5089	err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
5090	if (err) {
5091		dev_err(&pdev->dev, "PCI write config failed\n");
5092		goto out;
5093	}
5094
5095	rtnl_lock();
5096	sky2_reset(hw);
5097	sky2_all_up(hw);
5098	rtnl_unlock();
5099
5100	return 0;
5101out:
5102
5103	dev_err(&pdev->dev, "resume failed (%d)\n", err);
5104	pci_disable_device(pdev);
5105	return err;
5106}
5107
5108static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
5109#define SKY2_PM_OPS (&sky2_pm_ops)
5110
5111#else
5112
5113#define SKY2_PM_OPS NULL
5114#endif
5115
5116static void sky2_shutdown(struct pci_dev *pdev)
5117{
5118	struct sky2_hw *hw = pci_get_drvdata(pdev);
5119	int port;
5120
5121	for (port = 0; port < hw->ports; port++) {
5122		struct net_device *ndev = hw->dev[port];
5123
5124		rtnl_lock();
5125		if (netif_running(ndev)) {
5126			dev_close(ndev);
5127			netif_device_detach(ndev);
5128		}
5129		rtnl_unlock();
5130	}
5131	sky2_suspend(&pdev->dev);
5132	pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
5133	pci_set_power_state(pdev, PCI_D3hot);
5134}
5135
5136static struct pci_driver sky2_driver = {
5137	.name = DRV_NAME,
5138	.id_table = sky2_id_table,
5139	.probe = sky2_probe,
5140	.remove = sky2_remove,
5141	.shutdown = sky2_shutdown,
5142	.driver.pm = SKY2_PM_OPS,
5143};
5144
5145static int __init sky2_init_module(void)
5146{
5147	pr_info("driver version " DRV_VERSION "\n");
5148
5149	sky2_debug_init();
5150	return pci_register_driver(&sky2_driver);
5151}
5152
5153static void __exit sky2_cleanup_module(void)
5154{
5155	pci_unregister_driver(&sky2_driver);
5156	sky2_debug_cleanup();
5157}
5158
5159module_init(sky2_init_module);
5160module_exit(sky2_cleanup_module);
5161
5162MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
5163MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
5164MODULE_LICENSE("GPL");
5165MODULE_VERSION(DRV_VERSION);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * New driver for Marvell Yukon 2 chipset.
   4 * Based on earlier sk98lin, and skge driver.
   5 *
   6 * This driver intentionally does not support all the features
   7 * of the original driver such as link fail-over and link management because
   8 * those should be done at higher levels.
   9 *
  10 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/crc32.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/netdevice.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/etherdevice.h>
  21#include <linux/ethtool.h>
  22#include <linux/pci.h>
  23#include <linux/interrupt.h>
  24#include <linux/ip.h>
  25#include <linux/slab.h>
  26#include <net/ip.h>
  27#include <linux/tcp.h>
  28#include <linux/in.h>
  29#include <linux/delay.h>
  30#include <linux/workqueue.h>
  31#include <linux/if_vlan.h>
  32#include <linux/prefetch.h>
  33#include <linux/debugfs.h>
  34#include <linux/mii.h>
  35#include <linux/of_device.h>
  36#include <linux/of_net.h>
  37#include <linux/dmi.h>
 
  38
  39#include <asm/irq.h>
  40
  41#include "sky2.h"
  42
  43#define DRV_NAME		"sky2"
  44#define DRV_VERSION		"1.30"
  45
  46/*
  47 * The Yukon II chipset takes 64 bit command blocks (called list elements)
  48 * that are organized into three (receive, transmit, status) different rings
  49 * similar to Tigon3.
  50 */
  51
  52#define RX_LE_SIZE	    	1024
  53#define RX_LE_BYTES		(RX_LE_SIZE*sizeof(struct sky2_rx_le))
  54#define RX_MAX_PENDING		(RX_LE_SIZE/6 - 2)
  55#define RX_DEF_PENDING		RX_MAX_PENDING
  56
  57/* This is the worst case number of transmit list elements for a single skb:
  58 * VLAN:GSO + CKSUM + Data + skb_frags * DMA
  59 */
  60#define MAX_SKB_TX_LE	(2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
  61#define TX_MIN_PENDING		(MAX_SKB_TX_LE+1)
  62#define TX_MAX_PENDING		1024
  63#define TX_DEF_PENDING		63
  64
  65#define TX_WATCHDOG		(5 * HZ)
  66#define PHY_RETRIES		1000
  67
  68#define SKY2_EEPROM_MAGIC	0x9955aabb
  69
  70#define RING_NEXT(x, s)	(((x)+1) & ((s)-1))
  71
  72static const u32 default_msg =
  73    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  74    | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
  75    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  76
  77static int debug = -1;		/* defaults above */
  78module_param(debug, int, 0);
  79MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  80
  81static int copybreak __read_mostly = 128;
  82module_param(copybreak, int, 0);
  83MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  84
  85static int disable_msi = -1;
  86module_param(disable_msi, int, 0);
  87MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  88
  89static int legacy_pme = 0;
  90module_param(legacy_pme, int, 0);
  91MODULE_PARM_DESC(legacy_pme, "Legacy power management");
  92
  93static const struct pci_device_id sky2_id_table[] = {
  94	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
  95	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
  96	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
  97	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
  98	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, 	/* DGE-550SX */
  99	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },	/* DGE-560SX */
 100	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) },	/* DGE-550T */
 101	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
 102	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
 103	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
 104	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
 105	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
 106	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
 107	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
 108	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
 109	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
 110	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
 111	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
 112	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
 113	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
 114	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */
 115	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
 116	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */
 117	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
 118	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
 119	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
 120	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
 121	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
 122	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
 123	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
 124	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
 125	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
 126	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
 127	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
 128	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
 129	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
 130	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
 131	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
 132	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
 
 133	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
 134	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
 135	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
 136	{ 0 }
 137};
 138
 139MODULE_DEVICE_TABLE(pci, sky2_id_table);
 140
 141/* Avoid conditionals by using array */
 142static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
 143static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
 144static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
 145
 146static void sky2_set_multicast(struct net_device *dev);
 147static irqreturn_t sky2_intr(int irq, void *dev_id);
 148
 149/* Access to PHY via serial interconnect */
 150static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
 151{
 152	int i;
 153
 154	gma_write16(hw, port, GM_SMI_DATA, val);
 155	gma_write16(hw, port, GM_SMI_CTRL,
 156		    GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
 157
 158	for (i = 0; i < PHY_RETRIES; i++) {
 159		u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
 160		if (ctrl == 0xffff)
 161			goto io_error;
 162
 163		if (!(ctrl & GM_SMI_CT_BUSY))
 164			return 0;
 165
 166		udelay(10);
 167	}
 168
 169	dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name);
 170	return -ETIMEDOUT;
 171
 172io_error:
 173	dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
 174	return -EIO;
 175}
 176
 177static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
 178{
 179	int i;
 180
 181	gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
 182		    | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
 183
 184	for (i = 0; i < PHY_RETRIES; i++) {
 185		u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
 186		if (ctrl == 0xffff)
 187			goto io_error;
 188
 189		if (ctrl & GM_SMI_CT_RD_VAL) {
 190			*val = gma_read16(hw, port, GM_SMI_DATA);
 191			return 0;
 192		}
 193
 194		udelay(10);
 195	}
 196
 197	dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name);
 198	return -ETIMEDOUT;
 199io_error:
 200	dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
 201	return -EIO;
 202}
 203
 204static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
 205{
 206	u16 v = 0;
 207	__gm_phy_read(hw, port, reg, &v);
 208	return v;
 209}
 210
 211
 212static void sky2_power_on(struct sky2_hw *hw)
 213{
 214	/* switch power to VCC (WA for VAUX problem) */
 215	sky2_write8(hw, B0_POWER_CTRL,
 216		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 217
 218	/* disable Core Clock Division, */
 219	sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 220
 221	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
 222		/* enable bits are inverted */
 223		sky2_write8(hw, B2_Y2_CLK_GATE,
 224			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 225			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 226			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
 227	else
 228		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 229
 230	if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
 231		u32 reg;
 232
 233		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
 234
 235		reg = sky2_pci_read32(hw, PCI_DEV_REG4);
 236		/* set all bits to 0 except bits 15..12 and 8 */
 237		reg &= P_ASPM_CONTROL_MSK;
 238		sky2_pci_write32(hw, PCI_DEV_REG4, reg);
 239
 240		reg = sky2_pci_read32(hw, PCI_DEV_REG5);
 241		/* set all bits to 0 except bits 28 & 27 */
 242		reg &= P_CTL_TIM_VMAIN_AV_MSK;
 243		sky2_pci_write32(hw, PCI_DEV_REG5, reg);
 244
 245		sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
 246
 247		sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
 248
 249		/* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
 250		reg = sky2_read32(hw, B2_GP_IO);
 251		reg |= GLB_GPIO_STAT_RACE_DIS;
 252		sky2_write32(hw, B2_GP_IO, reg);
 253
 254		sky2_read32(hw, B2_GP_IO);
 255	}
 256
 257	/* Turn on "driver loaded" LED */
 258	sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON);
 259}
 260
 261static void sky2_power_aux(struct sky2_hw *hw)
 262{
 263	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
 264		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 265	else
 266		/* enable bits are inverted */
 267		sky2_write8(hw, B2_Y2_CLK_GATE,
 268			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 269			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 270			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
 271
 272	/* switch power to VAUX if supported and PME from D3cold */
 273	if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
 274	     pci_pme_capable(hw->pdev, PCI_D3cold))
 275		sky2_write8(hw, B0_POWER_CTRL,
 276			    (PC_VAUX_ENA | PC_VCC_ENA |
 277			     PC_VAUX_ON | PC_VCC_OFF));
 278
 279	/* turn off "driver loaded LED" */
 280	sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF);
 281}
 282
 283static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
 284{
 285	u16 reg;
 286
 287	/* disable all GMAC IRQ's */
 288	sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
 289
 290	gma_write16(hw, port, GM_MC_ADDR_H1, 0);	/* clear MC hash */
 291	gma_write16(hw, port, GM_MC_ADDR_H2, 0);
 292	gma_write16(hw, port, GM_MC_ADDR_H3, 0);
 293	gma_write16(hw, port, GM_MC_ADDR_H4, 0);
 294
 295	reg = gma_read16(hw, port, GM_RX_CTRL);
 296	reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
 297	gma_write16(hw, port, GM_RX_CTRL, reg);
 298}
 299
 300/* flow control to advertise bits */
 301static const u16 copper_fc_adv[] = {
 302	[FC_NONE]	= 0,
 303	[FC_TX]		= PHY_M_AN_ASP,
 304	[FC_RX]		= PHY_M_AN_PC,
 305	[FC_BOTH]	= PHY_M_AN_PC | PHY_M_AN_ASP,
 306};
 307
 308/* flow control to advertise bits when using 1000BaseX */
 309static const u16 fiber_fc_adv[] = {
 310	[FC_NONE] = PHY_M_P_NO_PAUSE_X,
 311	[FC_TX]   = PHY_M_P_ASYM_MD_X,
 312	[FC_RX]	  = PHY_M_P_SYM_MD_X,
 313	[FC_BOTH] = PHY_M_P_BOTH_MD_X,
 314};
 315
 316/* flow control to GMA disable bits */
 317static const u16 gm_fc_disable[] = {
 318	[FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
 319	[FC_TX]	  = GM_GPCR_FC_RX_DIS,
 320	[FC_RX]	  = GM_GPCR_FC_TX_DIS,
 321	[FC_BOTH] = 0,
 322};
 323
 324
 325static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 326{
 327	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 328	u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
 329
 330	if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
 331	    !(hw->flags & SKY2_HW_NEWER_PHY)) {
 332		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 333
 334		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
 335			   PHY_M_EC_MAC_S_MSK);
 336		ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
 337
 338		/* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
 339		if (hw->chip_id == CHIP_ID_YUKON_EC)
 340			/* set downshift counter to 3x and enable downshift */
 341			ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
 342		else
 343			/* set master & slave downshift counter to 1x */
 344			ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
 345
 346		gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
 347	}
 348
 349	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 350	if (sky2_is_copper(hw)) {
 351		if (!(hw->flags & SKY2_HW_GIGABIT)) {
 352			/* enable automatic crossover */
 353			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
 354
 355			if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
 356			    hw->chip_rev == CHIP_REV_YU_FE2_A0) {
 357				u16 spec;
 358
 359				/* Enable Class A driver for FE+ A0 */
 360				spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
 361				spec |= PHY_M_FESC_SEL_CL_A;
 362				gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
 363			}
 364		} else {
 365			/* disable energy detect */
 366			ctrl &= ~PHY_M_PC_EN_DET_MSK;
 367
 368			/* enable automatic crossover */
 369			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
 370
 371			/* downshift on PHY 88E1112 and 88E1149 is changed */
 372			if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
 373			     (hw->flags & SKY2_HW_NEWER_PHY)) {
 374				/* set downshift counter to 3x and enable downshift */
 375				ctrl &= ~PHY_M_PC_DSC_MSK;
 376				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
 377			}
 378		}
 379	} else {
 380		/* workaround for deviation #4.88 (CRC errors) */
 381		/* disable Automatic Crossover */
 382
 383		ctrl &= ~PHY_M_PC_MDIX_MSK;
 384	}
 385
 386	gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 387
 388	/* special setup for PHY 88E1112 Fiber */
 389	if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
 390		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 391
 392		/* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
 393		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
 394		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 395		ctrl &= ~PHY_M_MAC_MD_MSK;
 396		ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
 397		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 398
 399		if (hw->pmd_type  == 'P') {
 400			/* select page 1 to access Fiber registers */
 401			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
 402
 403			/* for SFP-module set SIGDET polarity to low */
 404			ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 405			ctrl |= PHY_M_FIB_SIGD_POL;
 406			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 407		}
 408
 409		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 410	}
 411
 412	ctrl = PHY_CT_RESET;
 413	ct1000 = 0;
 414	adv = PHY_AN_CSMA;
 415	reg = 0;
 416
 417	if (sky2->flags & SKY2_FLAG_AUTO_SPEED) {
 418		if (sky2_is_copper(hw)) {
 419			if (sky2->advertising & ADVERTISED_1000baseT_Full)
 420				ct1000 |= PHY_M_1000C_AFD;
 421			if (sky2->advertising & ADVERTISED_1000baseT_Half)
 422				ct1000 |= PHY_M_1000C_AHD;
 423			if (sky2->advertising & ADVERTISED_100baseT_Full)
 424				adv |= PHY_M_AN_100_FD;
 425			if (sky2->advertising & ADVERTISED_100baseT_Half)
 426				adv |= PHY_M_AN_100_HD;
 427			if (sky2->advertising & ADVERTISED_10baseT_Full)
 428				adv |= PHY_M_AN_10_FD;
 429			if (sky2->advertising & ADVERTISED_10baseT_Half)
 430				adv |= PHY_M_AN_10_HD;
 431
 432		} else {	/* special defines for FIBER (88E1040S only) */
 433			if (sky2->advertising & ADVERTISED_1000baseT_Full)
 434				adv |= PHY_M_AN_1000X_AFD;
 435			if (sky2->advertising & ADVERTISED_1000baseT_Half)
 436				adv |= PHY_M_AN_1000X_AHD;
 437		}
 438
 439		/* Restart Auto-negotiation */
 440		ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
 441	} else {
 442		/* forced speed/duplex settings */
 443		ct1000 = PHY_M_1000C_MSE;
 444
 445		/* Disable auto update for duplex flow control and duplex */
 446		reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS;
 447
 448		switch (sky2->speed) {
 449		case SPEED_1000:
 450			ctrl |= PHY_CT_SP1000;
 451			reg |= GM_GPCR_SPEED_1000;
 452			break;
 453		case SPEED_100:
 454			ctrl |= PHY_CT_SP100;
 455			reg |= GM_GPCR_SPEED_100;
 456			break;
 457		}
 458
 459		if (sky2->duplex == DUPLEX_FULL) {
 460			reg |= GM_GPCR_DUP_FULL;
 461			ctrl |= PHY_CT_DUP_MD;
 462		} else if (sky2->speed < SPEED_1000)
 463			sky2->flow_mode = FC_NONE;
 464	}
 465
 466	if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) {
 467		if (sky2_is_copper(hw))
 468			adv |= copper_fc_adv[sky2->flow_mode];
 469		else
 470			adv |= fiber_fc_adv[sky2->flow_mode];
 471	} else {
 472		reg |= GM_GPCR_AU_FCT_DIS;
 473		reg |= gm_fc_disable[sky2->flow_mode];
 474
 475		/* Forward pause packets to GMAC? */
 476		if (sky2->flow_mode & FC_RX)
 477			sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
 478		else
 479			sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
 480	}
 481
 482	gma_write16(hw, port, GM_GP_CTRL, reg);
 483
 484	if (hw->flags & SKY2_HW_GIGABIT)
 485		gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
 486
 487	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
 488	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
 489
 490	/* Setup Phy LED's */
 491	ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
 492	ledover = 0;
 493
 494	switch (hw->chip_id) {
 495	case CHIP_ID_YUKON_FE:
 496		/* on 88E3082 these bits are at 11..9 (shifted left) */
 497		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
 498
 499		ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
 500
 501		/* delete ACT LED control bits */
 502		ctrl &= ~PHY_M_FELP_LED1_MSK;
 503		/* change ACT LED control to blink mode */
 504		ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
 505		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
 506		break;
 507
 508	case CHIP_ID_YUKON_FE_P:
 509		/* Enable Link Partner Next Page */
 510		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 511		ctrl |= PHY_M_PC_ENA_LIP_NP;
 512
 513		/* disable Energy Detect and enable scrambler */
 514		ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
 515		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 516
 517		/* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
 518		ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
 519			PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
 520			PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
 521
 522		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
 523		break;
 524
 525	case CHIP_ID_YUKON_XL:
 526		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 527
 528		/* select page 3 to access LED control register */
 529		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
 530
 531		/* set LED Function Control register */
 532		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
 533			     (PHY_M_LEDC_LOS_CTRL(1) |	/* LINK/ACT */
 534			      PHY_M_LEDC_INIT_CTRL(7) |	/* 10 Mbps */
 535			      PHY_M_LEDC_STA1_CTRL(7) |	/* 100 Mbps */
 536			      PHY_M_LEDC_STA0_CTRL(7)));	/* 1000 Mbps */
 537
 538		/* set Polarity Control register */
 539		gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
 540			     (PHY_M_POLC_LS1_P_MIX(4) |
 541			      PHY_M_POLC_IS0_P_MIX(4) |
 542			      PHY_M_POLC_LOS_CTRL(2) |
 543			      PHY_M_POLC_INIT_CTRL(2) |
 544			      PHY_M_POLC_STA1_CTRL(2) |
 545			      PHY_M_POLC_STA0_CTRL(2)));
 546
 547		/* restore page register */
 548		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 549		break;
 550
 551	case CHIP_ID_YUKON_EC_U:
 552	case CHIP_ID_YUKON_EX:
 553	case CHIP_ID_YUKON_SUPR:
 554		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 555
 556		/* select page 3 to access LED control register */
 557		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
 558
 559		/* set LED Function Control register */
 560		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
 561			     (PHY_M_LEDC_LOS_CTRL(1) |	/* LINK/ACT */
 562			      PHY_M_LEDC_INIT_CTRL(8) |	/* 10 Mbps */
 563			      PHY_M_LEDC_STA1_CTRL(7) |	/* 100 Mbps */
 564			      PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
 565
 566		/* set Blink Rate in LED Timer Control Register */
 567		gm_phy_write(hw, port, PHY_MARV_INT_MASK,
 568			     ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
 569		/* restore page register */
 570		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 571		break;
 572
 573	default:
 574		/* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
 575		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
 576
 577		/* turn off the Rx LED (LED_RX) */
 578		ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
 579	}
 580
 581	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
 582		/* apply fixes in PHY AFE */
 583		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
 584
 585		/* increase differential signal amplitude in 10BASE-T */
 586		gm_phy_write(hw, port, 0x18, 0xaa99);
 587		gm_phy_write(hw, port, 0x17, 0x2011);
 588
 589		if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
 590			/* fix for IEEE A/B Symmetry failure in 1000BASE-T */
 591			gm_phy_write(hw, port, 0x18, 0xa204);
 592			gm_phy_write(hw, port, 0x17, 0x2002);
 593		}
 594
 595		/* set page register to 0 */
 596		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 597	} else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
 598		   hw->chip_rev == CHIP_REV_YU_FE2_A0) {
 599		/* apply workaround for integrated resistors calibration */
 600		gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
 601		gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
 602	} else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
 603		/* apply fixes in PHY AFE */
 604		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
 605
 606		/* apply RDAC termination workaround */
 607		gm_phy_write(hw, port, 24, 0x2800);
 608		gm_phy_write(hw, port, 23, 0x2001);
 609
 610		/* set page register back to 0 */
 611		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 612	} else if (hw->chip_id != CHIP_ID_YUKON_EX &&
 613		   hw->chip_id < CHIP_ID_YUKON_SUPR) {
 614		/* no effect on Yukon-XL */
 615		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
 616
 617		if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) ||
 618		    sky2->speed == SPEED_100) {
 619			/* turn on 100 Mbps LED (LED_LINK100) */
 620			ledover |= PHY_M_LED_MO_100(MO_LED_ON);
 621		}
 622
 623		if (ledover)
 624			gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
 625
 626	} else if (hw->chip_id == CHIP_ID_YUKON_PRM &&
 627		   (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) {
 628		int i;
 629		/* This a phy register setup workaround copied from vendor driver. */
 630		static const struct {
 631			u16 reg, val;
 632		} eee_afe[] = {
 633			{ 0x156, 0x58ce },
 634			{ 0x153, 0x99eb },
 635			{ 0x141, 0x8064 },
 636			/* { 0x155, 0x130b },*/
 637			{ 0x000, 0x0000 },
 638			{ 0x151, 0x8433 },
 639			{ 0x14b, 0x8c44 },
 640			{ 0x14c, 0x0f90 },
 641			{ 0x14f, 0x39aa },
 642			/* { 0x154, 0x2f39 },*/
 643			{ 0x14d, 0xba33 },
 644			{ 0x144, 0x0048 },
 645			{ 0x152, 0x2010 },
 646			/* { 0x158, 0x1223 },*/
 647			{ 0x140, 0x4444 },
 648			{ 0x154, 0x2f3b },
 649			{ 0x158, 0xb203 },
 650			{ 0x157, 0x2029 },
 651		};
 652
 653		/* Start Workaround for OptimaEEE Rev.Z0 */
 654		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb);
 655
 656		gm_phy_write(hw, port,  1, 0x4099);
 657		gm_phy_write(hw, port,  3, 0x1120);
 658		gm_phy_write(hw, port, 11, 0x113c);
 659		gm_phy_write(hw, port, 14, 0x8100);
 660		gm_phy_write(hw, port, 15, 0x112a);
 661		gm_phy_write(hw, port, 17, 0x1008);
 662
 663		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc);
 664		gm_phy_write(hw, port,  1, 0x20b0);
 665
 666		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
 667
 668		for (i = 0; i < ARRAY_SIZE(eee_afe); i++) {
 669			/* apply AFE settings */
 670			gm_phy_write(hw, port, 17, eee_afe[i].val);
 671			gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13);
 672		}
 673
 674		/* End Workaround for OptimaEEE */
 675		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 676
 677		/* Enable 10Base-Te (EEE) */
 678		if (hw->chip_id >= CHIP_ID_YUKON_PRM) {
 679			reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 680			gm_phy_write(hw, port, PHY_MARV_EXT_CTRL,
 681				     reg | PHY_M_10B_TE_ENABLE);
 682		}
 683	}
 684
 685	/* Enable phy interrupt on auto-negotiation complete (or link up) */
 686	if (sky2->flags & SKY2_FLAG_AUTO_SPEED)
 687		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
 688	else
 689		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
 690}
 691
 692static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
 693static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
 694
 695static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
 696{
 697	u32 reg1;
 698
 699	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 700	reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
 701	reg1 &= ~phy_power[port];
 702
 703	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
 704		reg1 |= coma_mode[port];
 705
 706	sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
 707	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 708	sky2_pci_read32(hw, PCI_DEV_REG1);
 709
 710	if (hw->chip_id == CHIP_ID_YUKON_FE)
 711		gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
 712	else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
 713		sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 714}
 715
 716static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
 717{
 718	u32 reg1;
 719	u16 ctrl;
 720
 721	/* release GPHY Control reset */
 722	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 723
 724	/* release GMAC reset */
 725	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 726
 727	if (hw->flags & SKY2_HW_NEWER_PHY) {
 728		/* select page 2 to access MAC control register */
 729		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
 730
 731		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 732		/* allow GMII Power Down */
 733		ctrl &= ~PHY_M_MAC_GMIF_PUP;
 734		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 735
 736		/* set page register back to 0 */
 737		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 738	}
 739
 740	/* setup General Purpose Control Register */
 741	gma_write16(hw, port, GM_GP_CTRL,
 742		    GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 |
 743		    GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |
 744		    GM_GPCR_AU_SPD_DIS);
 745
 746	if (hw->chip_id != CHIP_ID_YUKON_EC) {
 747		if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
 748			/* select page 2 to access MAC control register */
 749			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
 750
 751			ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 752			/* enable Power Down */
 753			ctrl |= PHY_M_PC_POW_D_ENA;
 754			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 755
 756			/* set page register back to 0 */
 757			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 758		}
 759
 760		/* set IEEE compatible Power Down Mode (dev. #4.99) */
 761		gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
 762	}
 763
 764	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 765	reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
 766	reg1 |= phy_power[port];		/* set PHY to PowerDown/COMA Mode */
 767	sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
 768	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 769}
 770
 771/* configure IPG according to used link speed */
 772static void sky2_set_ipg(struct sky2_port *sky2)
 773{
 774	u16 reg;
 775
 776	reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE);
 777	reg &= ~GM_SMOD_IPG_MSK;
 778	if (sky2->speed > SPEED_100)
 779		reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
 780	else
 781		reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
 782	gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg);
 783}
 784
 785/* Enable Rx/Tx */
 786static void sky2_enable_rx_tx(struct sky2_port *sky2)
 787{
 788	struct sky2_hw *hw = sky2->hw;
 789	unsigned port = sky2->port;
 790	u16 reg;
 791
 792	reg = gma_read16(hw, port, GM_GP_CTRL);
 793	reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
 794	gma_write16(hw, port, GM_GP_CTRL, reg);
 795}
 796
 797/* Force a renegotiation */
 798static void sky2_phy_reinit(struct sky2_port *sky2)
 799{
 800	spin_lock_bh(&sky2->phy_lock);
 801	sky2_phy_init(sky2->hw, sky2->port);
 802	sky2_enable_rx_tx(sky2);
 803	spin_unlock_bh(&sky2->phy_lock);
 804}
 805
 806/* Put device in state to listen for Wake On Lan */
 807static void sky2_wol_init(struct sky2_port *sky2)
 808{
 809	struct sky2_hw *hw = sky2->hw;
 810	unsigned port = sky2->port;
 811	enum flow_control save_mode;
 812	u16 ctrl;
 813
 814	/* Bring hardware out of reset */
 815	sky2_write16(hw, B0_CTST, CS_RST_CLR);
 816	sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
 817
 818	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 819	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 820
 821	/* Force to 10/100
 822	 * sky2_reset will re-enable on resume
 823	 */
 824	save_mode = sky2->flow_mode;
 825	ctrl = sky2->advertising;
 826
 827	sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
 828	sky2->flow_mode = FC_NONE;
 829
 830	spin_lock_bh(&sky2->phy_lock);
 831	sky2_phy_power_up(hw, port);
 832	sky2_phy_init(hw, port);
 833	spin_unlock_bh(&sky2->phy_lock);
 834
 835	sky2->flow_mode = save_mode;
 836	sky2->advertising = ctrl;
 837
 838	/* Set GMAC to no flow control and auto update for speed/duplex */
 839	gma_write16(hw, port, GM_GP_CTRL,
 840		    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
 841		    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
 842
 843	/* Set WOL address */
 844	memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
 845		    sky2->netdev->dev_addr, ETH_ALEN);
 846
 847	/* Turn on appropriate WOL control bits */
 848	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
 849	ctrl = 0;
 850	if (sky2->wol & WAKE_PHY)
 851		ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
 852	else
 853		ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
 854
 855	if (sky2->wol & WAKE_MAGIC)
 856		ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
 857	else
 858		ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
 859
 860	ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
 861	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
 862
 863	/* Disable PiG firmware */
 864	sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
 865
 866	/* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */
 867	if (legacy_pme) {
 868		u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
 869		reg1 |= PCI_Y2_PME_LEGACY;
 870		sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
 871	}
 872
 873	/* block receiver */
 874	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
 875	sky2_read32(hw, B0_CTST);
 876}
 877
 878static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
 879{
 880	struct net_device *dev = hw->dev[port];
 881
 882	if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
 883	      hw->chip_rev != CHIP_REV_YU_EX_A0) ||
 884	     hw->chip_id >= CHIP_ID_YUKON_FE_P) {
 885		/* Yukon-Extreme B0 and further Extreme devices */
 886		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
 887	} else if (dev->mtu > ETH_DATA_LEN) {
 888		/* set Tx GMAC FIFO Almost Empty Threshold */
 889		sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
 890			     (ECU_JUMBO_WM << 16) | ECU_AE_THR);
 891
 892		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
 893	} else
 894		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
 895}
 896
 897static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 898{
 899	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 900	u16 reg;
 901	u32 rx_reg;
 902	int i;
 903	const u8 *addr = hw->dev[port]->dev_addr;
 904
 905	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
 906	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 907
 908	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 909
 910	if (hw->chip_id == CHIP_ID_YUKON_XL &&
 911	    hw->chip_rev == CHIP_REV_YU_XL_A0 &&
 912	    port == 1) {
 913		/* WA DEV_472 -- looks like crossed wires on port 2 */
 914		/* clear GMAC 1 Control reset */
 915		sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
 916		do {
 917			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
 918			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
 919		} while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
 920			 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
 921			 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
 922	}
 923
 924	sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
 925
 926	/* Enable Transmit FIFO Underrun */
 927	sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
 928
 929	spin_lock_bh(&sky2->phy_lock);
 930	sky2_phy_power_up(hw, port);
 931	sky2_phy_init(hw, port);
 932	spin_unlock_bh(&sky2->phy_lock);
 933
 934	/* MIB clear */
 935	reg = gma_read16(hw, port, GM_PHY_ADDR);
 936	gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
 937
 938	for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
 939		gma_read16(hw, port, i);
 940	gma_write16(hw, port, GM_PHY_ADDR, reg);
 941
 942	/* transmit control */
 943	gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
 944
 945	/* receive control reg: unicast + multicast + no FCS  */
 946	gma_write16(hw, port, GM_RX_CTRL,
 947		    GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
 948
 949	/* transmit flow control */
 950	gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
 951
 952	/* transmit parameter */
 953	gma_write16(hw, port, GM_TX_PARAM,
 954		    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
 955		    TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
 956		    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
 957		    TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
 958
 959	/* serial mode register */
 960	reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
 961		GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000);
 962
 963	if (hw->dev[port]->mtu > ETH_DATA_LEN)
 964		reg |= GM_SMOD_JUMBO_ENA;
 965
 966	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
 967	    hw->chip_rev == CHIP_REV_YU_EC_U_B1)
 968		reg |= GM_NEW_FLOW_CTRL;
 969
 970	gma_write16(hw, port, GM_SERIAL_MODE, reg);
 971
 972	/* virtual address for data */
 973	gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
 974
 975	/* physical address: used for pause frames */
 976	gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
 977
 978	/* ignore counter overflows */
 979	gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
 980	gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
 981	gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
 982
 983	/* Configure Rx MAC FIFO */
 984	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
 985	rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 986	if (hw->chip_id == CHIP_ID_YUKON_EX ||
 987	    hw->chip_id == CHIP_ID_YUKON_FE_P)
 988		rx_reg |= GMF_RX_OVER_ON;
 989
 990	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
 991
 992	if (hw->chip_id == CHIP_ID_YUKON_XL) {
 993		/* Hardware errata - clear flush mask */
 994		sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
 995	} else {
 996		/* Flush Rx MAC FIFO on any flow control or error */
 997		sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
 998	}
 999
1000	/* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug  */
1001	reg = RX_GMF_FL_THR_DEF + 1;
1002	/* Another magic mystery workaround from sk98lin */
1003	if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
1004	    hw->chip_rev == CHIP_REV_YU_FE2_A0)
1005		reg = 0x178;
1006	sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
1007
1008	/* Configure Tx MAC FIFO */
1009	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1010	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1011
1012	/* On chips without ram buffer, pause is controlled by MAC level */
1013	if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
1014		/* Pause threshold is scaled by 8 in bytes */
1015		if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
1016		    hw->chip_rev == CHIP_REV_YU_FE2_A0)
1017			reg = 1568 / 8;
1018		else
1019			reg = 1024 / 8;
1020		sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
1021		sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
1022
1023		sky2_set_tx_stfwd(hw, port);
1024	}
1025
1026	if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
1027	    hw->chip_rev == CHIP_REV_YU_FE2_A0) {
1028		/* disable dynamic watermark */
1029		reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
1030		reg &= ~TX_DYN_WM_ENA;
1031		sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
1032	}
1033}
1034
1035/* Assign Ram Buffer allocation to queue */
1036static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
1037{
1038	u32 end;
1039
1040	/* convert from K bytes to qwords used for hw register */
1041	start *= 1024/8;
1042	space *= 1024/8;
1043	end = start + space - 1;
1044
1045	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
1046	sky2_write32(hw, RB_ADDR(q, RB_START), start);
1047	sky2_write32(hw, RB_ADDR(q, RB_END), end);
1048	sky2_write32(hw, RB_ADDR(q, RB_WP), start);
1049	sky2_write32(hw, RB_ADDR(q, RB_RP), start);
1050
1051	if (q == Q_R1 || q == Q_R2) {
1052		u32 tp = space - space/4;
1053
1054		/* On receive queue's set the thresholds
1055		 * give receiver priority when > 3/4 full
1056		 * send pause when down to 2K
1057		 */
1058		sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
1059		sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
1060
1061		tp = space - 8192/8;
1062		sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
1063		sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
1064	} else {
1065		/* Enable store & forward on Tx queue's because
1066		 * Tx FIFO is only 1K on Yukon
1067		 */
1068		sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
1069	}
1070
1071	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
1072	sky2_read8(hw, RB_ADDR(q, RB_CTRL));
1073}
1074
1075/* Setup Bus Memory Interface */
1076static void sky2_qset(struct sky2_hw *hw, u16 q)
1077{
1078	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
1079	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
1080	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
1081	sky2_write32(hw, Q_ADDR(q, Q_WM),  BMU_WM_DEFAULT);
1082}
1083
1084/* Setup prefetch unit registers. This is the interface between
1085 * hardware and driver list elements
1086 */
1087static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
1088			       dma_addr_t addr, u32 last)
1089{
1090	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1091	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
1092	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr));
1093	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr));
1094	sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
1095	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
1096
1097	sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
1098}
1099
1100static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
1101{
1102	struct sky2_tx_le *le = sky2->tx_le + *slot;
1103
1104	*slot = RING_NEXT(*slot, sky2->tx_ring_size);
1105	le->ctrl = 0;
1106	return le;
1107}
1108
1109static void tx_init(struct sky2_port *sky2)
1110{
1111	struct sky2_tx_le *le;
1112
1113	sky2->tx_prod = sky2->tx_cons = 0;
1114	sky2->tx_tcpsum = 0;
1115	sky2->tx_last_mss = 0;
1116	netdev_reset_queue(sky2->netdev);
1117
1118	le = get_tx_le(sky2, &sky2->tx_prod);
1119	le->addr = 0;
1120	le->opcode = OP_ADDR64 | HW_OWNER;
1121	sky2->tx_last_upper = 0;
1122}
1123
1124/* Update chip's next pointer */
1125static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
1126{
1127	/* Make sure write' to descriptors are complete before we tell hardware */
1128	wmb();
1129	sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
1130}
1131
1132
1133static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
1134{
1135	struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
1136	sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
1137	le->ctrl = 0;
1138	return le;
1139}
1140
1141static unsigned sky2_get_rx_threshold(struct sky2_port *sky2)
1142{
1143	unsigned size;
1144
1145	/* Space needed for frame data + headers rounded up */
1146	size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1147
1148	/* Stopping point for hardware truncation */
1149	return (size - 8) / sizeof(u32);
1150}
1151
1152static unsigned sky2_get_rx_data_size(struct sky2_port *sky2)
1153{
1154	struct rx_ring_info *re;
1155	unsigned size;
1156
1157	/* Space needed for frame data + headers rounded up */
1158	size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1159
1160	sky2->rx_nfrags = size >> PAGE_SHIFT;
1161	BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
1162
1163	/* Compute residue after pages */
1164	size -= sky2->rx_nfrags << PAGE_SHIFT;
1165
1166	/* Optimize to handle small packets and headers */
1167	if (size < copybreak)
1168		size = copybreak;
1169	if (size < ETH_HLEN)
1170		size = ETH_HLEN;
1171
1172	return size;
1173}
1174
1175/* Build description to hardware for one receive segment */
1176static void sky2_rx_add(struct sky2_port *sky2, u8 op,
1177			dma_addr_t map, unsigned len)
1178{
1179	struct sky2_rx_le *le;
1180
1181	if (sizeof(dma_addr_t) > sizeof(u32)) {
1182		le = sky2_next_rx(sky2);
1183		le->addr = cpu_to_le32(upper_32_bits(map));
1184		le->opcode = OP_ADDR64 | HW_OWNER;
1185	}
1186
1187	le = sky2_next_rx(sky2);
1188	le->addr = cpu_to_le32(lower_32_bits(map));
1189	le->length = cpu_to_le16(len);
1190	le->opcode = op | HW_OWNER;
1191}
1192
1193/* Build description to hardware for one possibly fragmented skb */
1194static void sky2_rx_submit(struct sky2_port *sky2,
1195			   const struct rx_ring_info *re)
1196{
1197	int i;
1198
1199	sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
1200
1201	for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
1202		sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
1203}
1204
1205
1206static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1207			    unsigned size)
1208{
1209	struct sk_buff *skb = re->skb;
1210	int i;
1211
1212	re->data_addr = dma_map_single(&pdev->dev, skb->data, size,
1213				       DMA_FROM_DEVICE);
1214	if (dma_mapping_error(&pdev->dev, re->data_addr))
1215		goto mapping_error;
1216
1217	dma_unmap_len_set(re, data_size, size);
1218
1219	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1220		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1221
1222		re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
1223						    skb_frag_size(frag),
1224						    DMA_FROM_DEVICE);
1225
1226		if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
1227			goto map_page_error;
1228	}
1229	return 0;
1230
1231map_page_error:
1232	while (--i >= 0) {
1233		dma_unmap_page(&pdev->dev, re->frag_addr[i],
1234			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
1235			       DMA_FROM_DEVICE);
1236	}
1237
1238	dma_unmap_single(&pdev->dev, re->data_addr,
1239			 dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
1240
1241mapping_error:
1242	if (net_ratelimit())
1243		dev_warn(&pdev->dev, "%s: rx mapping error\n",
1244			 skb->dev->name);
1245	return -EIO;
1246}
1247
1248static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
1249{
1250	struct sk_buff *skb = re->skb;
1251	int i;
1252
1253	dma_unmap_single(&pdev->dev, re->data_addr,
1254			 dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
1255
1256	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1257		dma_unmap_page(&pdev->dev, re->frag_addr[i],
1258			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
1259			       DMA_FROM_DEVICE);
1260}
1261
1262/* Tell chip where to start receive checksum.
1263 * Actually has two checksums, but set both same to avoid possible byte
1264 * order problems.
1265 */
1266static void rx_set_checksum(struct sky2_port *sky2)
1267{
1268	struct sky2_rx_le *le = sky2_next_rx(sky2);
1269
1270	le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
1271	le->ctrl = 0;
1272	le->opcode = OP_TCPSTART | HW_OWNER;
1273
1274	sky2_write32(sky2->hw,
1275		     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1276		     (sky2->netdev->features & NETIF_F_RXCSUM)
1277		     ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1278}
1279
1280/* Enable/disable receive hash calculation (RSS) */
1281static void rx_set_rss(struct net_device *dev, netdev_features_t features)
1282{
1283	struct sky2_port *sky2 = netdev_priv(dev);
1284	struct sky2_hw *hw = sky2->hw;
1285	int i, nkeys = 4;
1286
1287	/* Supports IPv6 and other modes */
1288	if (hw->flags & SKY2_HW_NEW_LE) {
1289		nkeys = 10;
1290		sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
1291	}
1292
1293	/* Program RSS initial values */
1294	if (features & NETIF_F_RXHASH) {
1295		u32 rss_key[10];
1296
1297		netdev_rss_key_fill(rss_key, sizeof(rss_key));
1298		for (i = 0; i < nkeys; i++)
1299			sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
1300				     rss_key[i]);
1301
1302		/* Need to turn on (undocumented) flag to make hashing work  */
1303		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
1304			     RX_STFW_ENA);
1305
1306		sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1307			     BMU_ENA_RX_RSS_HASH);
1308	} else
1309		sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1310			     BMU_DIS_RX_RSS_HASH);
1311}
1312
1313/*
1314 * The RX Stop command will not work for Yukon-2 if the BMU does not
1315 * reach the end of packet and since we can't make sure that we have
1316 * incoming data, we must reset the BMU while it is not doing a DMA
1317 * transfer. Since it is possible that the RX path is still active,
1318 * the RX RAM buffer will be stopped first, so any possible incoming
1319 * data will not trigger a DMA. After the RAM buffer is stopped, the
1320 * BMU is polled until any DMA in progress is ended and only then it
1321 * will be reset.
1322 */
1323static void sky2_rx_stop(struct sky2_port *sky2)
1324{
1325	struct sky2_hw *hw = sky2->hw;
1326	unsigned rxq = rxqaddr[sky2->port];
1327	int i;
1328
1329	/* disable the RAM Buffer receive queue */
1330	sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
1331
1332	for (i = 0; i < 0xffff; i++)
1333		if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
1334		    == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
1335			goto stopped;
1336
1337	netdev_warn(sky2->netdev, "receiver stop failed\n");
1338stopped:
1339	sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
1340
1341	/* reset the Rx prefetch unit */
1342	sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1343}
1344
1345/* Clean out receive buffer area, assumes receiver hardware stopped */
1346static void sky2_rx_clean(struct sky2_port *sky2)
1347{
1348	unsigned i;
1349
1350	if (sky2->rx_le)
1351		memset(sky2->rx_le, 0, RX_LE_BYTES);
1352
1353	for (i = 0; i < sky2->rx_pending; i++) {
1354		struct rx_ring_info *re = sky2->rx_ring + i;
1355
1356		if (re->skb) {
1357			sky2_rx_unmap_skb(sky2->hw->pdev, re);
1358			kfree_skb(re->skb);
1359			re->skb = NULL;
1360		}
1361	}
1362}
1363
1364/* Basic MII support */
1365static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1366{
1367	struct mii_ioctl_data *data = if_mii(ifr);
1368	struct sky2_port *sky2 = netdev_priv(dev);
1369	struct sky2_hw *hw = sky2->hw;
1370	int err = -EOPNOTSUPP;
1371
1372	if (!netif_running(dev))
1373		return -ENODEV;	/* Phy still in reset */
1374
1375	switch (cmd) {
1376	case SIOCGMIIPHY:
1377		data->phy_id = PHY_ADDR_MARV;
1378
1379		fallthrough;
1380	case SIOCGMIIREG: {
1381		u16 val = 0;
1382
1383		spin_lock_bh(&sky2->phy_lock);
1384		err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
1385		spin_unlock_bh(&sky2->phy_lock);
1386
1387		data->val_out = val;
1388		break;
1389	}
1390
1391	case SIOCSMIIREG:
1392		spin_lock_bh(&sky2->phy_lock);
1393		err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
1394				   data->val_in);
1395		spin_unlock_bh(&sky2->phy_lock);
1396		break;
1397	}
1398	return err;
1399}
1400
1401#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
1402
1403static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
1404{
1405	struct sky2_port *sky2 = netdev_priv(dev);
1406	struct sky2_hw *hw = sky2->hw;
1407	u16 port = sky2->port;
1408
1409	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1410		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1411			     RX_VLAN_STRIP_ON);
1412	else
1413		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1414			     RX_VLAN_STRIP_OFF);
1415
1416	if (features & NETIF_F_HW_VLAN_CTAG_TX) {
1417		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1418			     TX_VLAN_TAG_ON);
1419
1420		dev->vlan_features |= SKY2_VLAN_OFFLOADS;
1421	} else {
1422		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1423			     TX_VLAN_TAG_OFF);
1424
1425		/* Can't do transmit offload of vlan without hw vlan */
1426		dev->vlan_features &= ~SKY2_VLAN_OFFLOADS;
1427	}
1428}
1429
1430/* Amount of required worst case padding in rx buffer */
1431static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
1432{
1433	return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2;
1434}
1435
1436/*
1437 * Allocate an skb for receiving. If the MTU is large enough
1438 * make the skb non-linear with a fragment list of pages.
1439 */
1440static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp)
1441{
1442	struct sk_buff *skb;
1443	int i;
1444
1445	skb = __netdev_alloc_skb(sky2->netdev,
1446				 sky2->rx_data_size + sky2_rx_pad(sky2->hw),
1447				 gfp);
1448	if (!skb)
1449		goto nomem;
1450
1451	if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
1452		unsigned char *start;
1453		/*
1454		 * Workaround for a bug in FIFO that cause hang
1455		 * if the FIFO if the receive buffer is not 64 byte aligned.
1456		 * The buffer returned from netdev_alloc_skb is
1457		 * aligned except if slab debugging is enabled.
1458		 */
1459		start = PTR_ALIGN(skb->data, 8);
1460		skb_reserve(skb, start - skb->data);
1461	} else
1462		skb_reserve(skb, NET_IP_ALIGN);
1463
1464	for (i = 0; i < sky2->rx_nfrags; i++) {
1465		struct page *page = alloc_page(gfp);
1466
1467		if (!page)
1468			goto free_partial;
1469		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
1470	}
1471
1472	return skb;
1473free_partial:
1474	kfree_skb(skb);
1475nomem:
1476	return NULL;
1477}
1478
1479static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
1480{
1481	sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
1482}
1483
1484static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
1485{
1486	struct sky2_hw *hw = sky2->hw;
1487	unsigned i;
1488
1489	sky2->rx_data_size = sky2_get_rx_data_size(sky2);
1490
1491	/* Fill Rx ring */
1492	for (i = 0; i < sky2->rx_pending; i++) {
1493		struct rx_ring_info *re = sky2->rx_ring + i;
1494
1495		re->skb = sky2_rx_alloc(sky2, GFP_KERNEL);
1496		if (!re->skb)
1497			return -ENOMEM;
1498
1499		if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
1500			dev_kfree_skb(re->skb);
1501			re->skb = NULL;
1502			return -ENOMEM;
1503		}
1504	}
1505	return 0;
1506}
1507
1508/*
1509 * Setup receiver buffer pool.
1510 * Normal case this ends up creating one list element for skb
1511 * in the receive ring. Worst case if using large MTU and each
1512 * allocation falls on a different 64 bit region, that results
1513 * in 6 list elements per ring entry.
1514 * One element is used for checksum enable/disable, and one
1515 * extra to avoid wrap.
1516 */
1517static void sky2_rx_start(struct sky2_port *sky2)
1518{
1519	struct sky2_hw *hw = sky2->hw;
1520	struct rx_ring_info *re;
1521	unsigned rxq = rxqaddr[sky2->port];
1522	unsigned i, thresh;
1523
1524	sky2->rx_put = sky2->rx_next = 0;
1525	sky2_qset(hw, rxq);
1526
1527	/* On PCI express lowering the watermark gives better performance */
1528	if (pci_is_pcie(hw->pdev))
1529		sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
1530
1531	/* These chips have no ram buffer?
1532	 * MAC Rx RAM Read is controlled by hardware
1533	 */
1534	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1535	    hw->chip_rev > CHIP_REV_YU_EC_U_A0)
1536		sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
1537
1538	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
1539
1540	if (!(hw->flags & SKY2_HW_NEW_LE))
1541		rx_set_checksum(sky2);
1542
1543	if (!(hw->flags & SKY2_HW_RSS_BROKEN))
1544		rx_set_rss(sky2->netdev, sky2->netdev->features);
1545
1546	/* submit Rx ring */
1547	for (i = 0; i < sky2->rx_pending; i++) {
1548		re = sky2->rx_ring + i;
1549		sky2_rx_submit(sky2, re);
1550	}
1551
1552	/*
1553	 * The receiver hangs if it receives frames larger than the
1554	 * packet buffer. As a workaround, truncate oversize frames, but
1555	 * the register is limited to 9 bits, so if you do frames > 2052
1556	 * you better get the MTU right!
1557	 */
1558	thresh = sky2_get_rx_threshold(sky2);
1559	if (thresh > 0x1ff)
1560		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1561	else {
1562		sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
1563		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1564	}
1565
1566	/* Tell chip about available buffers */
1567	sky2_rx_update(sky2, rxq);
1568
1569	if (hw->chip_id == CHIP_ID_YUKON_EX ||
1570	    hw->chip_id == CHIP_ID_YUKON_SUPR) {
1571		/*
1572		 * Disable flushing of non ASF packets;
1573		 * must be done after initializing the BMUs;
1574		 * drivers without ASF support should do this too, otherwise
1575		 * it may happen that they cannot run on ASF devices;
1576		 * remember that the MAC FIFO isn't reset during initialization.
1577		 */
1578		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
1579	}
1580
1581	if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
1582		/* Enable RX Home Address & Routing Header checksum fix */
1583		sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
1584			     RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
1585
1586		/* Enable TX Home Address & Routing Header checksum fix */
1587		sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
1588			     TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
1589	}
1590}
1591
1592static int sky2_alloc_buffers(struct sky2_port *sky2)
1593{
1594	struct sky2_hw *hw = sky2->hw;
1595
1596	/* must be power of 2 */
1597	sky2->tx_le = dma_alloc_coherent(&hw->pdev->dev,
1598					 sky2->tx_ring_size * sizeof(struct sky2_tx_le),
1599					 &sky2->tx_le_map, GFP_KERNEL);
1600	if (!sky2->tx_le)
1601		goto nomem;
1602
1603	sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info),
1604				GFP_KERNEL);
1605	if (!sky2->tx_ring)
1606		goto nomem;
1607
1608	sky2->rx_le = dma_alloc_coherent(&hw->pdev->dev, RX_LE_BYTES,
1609					 &sky2->rx_le_map, GFP_KERNEL);
1610	if (!sky2->rx_le)
1611		goto nomem;
1612
1613	sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
1614				GFP_KERNEL);
1615	if (!sky2->rx_ring)
1616		goto nomem;
1617
1618	return sky2_alloc_rx_skbs(sky2);
1619nomem:
1620	return -ENOMEM;
1621}
1622
1623static void sky2_free_buffers(struct sky2_port *sky2)
1624{
1625	struct sky2_hw *hw = sky2->hw;
1626
1627	sky2_rx_clean(sky2);
1628
1629	if (sky2->rx_le) {
1630		dma_free_coherent(&hw->pdev->dev, RX_LE_BYTES, sky2->rx_le,
1631				  sky2->rx_le_map);
1632		sky2->rx_le = NULL;
1633	}
1634	if (sky2->tx_le) {
1635		dma_free_coherent(&hw->pdev->dev,
1636				  sky2->tx_ring_size * sizeof(struct sky2_tx_le),
1637				  sky2->tx_le, sky2->tx_le_map);
1638		sky2->tx_le = NULL;
1639	}
1640	kfree(sky2->tx_ring);
1641	kfree(sky2->rx_ring);
1642
1643	sky2->tx_ring = NULL;
1644	sky2->rx_ring = NULL;
1645}
1646
1647static void sky2_hw_up(struct sky2_port *sky2)
1648{
1649	struct sky2_hw *hw = sky2->hw;
1650	unsigned port = sky2->port;
1651	u32 ramsize;
1652	int cap;
1653	struct net_device *otherdev = hw->dev[sky2->port^1];
1654
1655	tx_init(sky2);
1656
1657	/*
1658	 * On dual port PCI-X card, there is an problem where status
1659	 * can be received out of order due to split transactions
1660	 */
1661	if (otherdev && netif_running(otherdev) &&
1662	    (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
1663		u16 cmd;
1664
1665		cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1666		cmd &= ~PCI_X_CMD_MAX_SPLIT;
1667		sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1668	}
1669
1670	sky2_mac_init(hw, port);
1671
1672	/* Register is number of 4K blocks on internal RAM buffer. */
1673	ramsize = sky2_read8(hw, B2_E_0) * 4;
1674	if (ramsize > 0) {
1675		u32 rxspace;
1676
1677		netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize);
1678		if (ramsize < 16)
1679			rxspace = ramsize / 2;
1680		else
1681			rxspace = 8 + (2*(ramsize - 16))/3;
1682
1683		sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1684		sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1685
1686		/* Make sure SyncQ is disabled */
1687		sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1688			    RB_RST_SET);
1689	}
1690
1691	sky2_qset(hw, txqaddr[port]);
1692
1693	/* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
1694	if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
1695		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
1696
1697	/* Set almost empty threshold */
1698	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1699	    hw->chip_rev == CHIP_REV_YU_EC_U_A0)
1700		sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
1701
1702	sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1703			   sky2->tx_ring_size - 1);
1704
1705	sky2_vlan_mode(sky2->netdev, sky2->netdev->features);
1706	netdev_update_features(sky2->netdev);
1707
1708	sky2_rx_start(sky2);
1709}
1710
1711/* Setup device IRQ and enable napi to process */
1712static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
1713{
1714	struct pci_dev *pdev = hw->pdev;
1715	int err;
1716
1717	err = request_irq(pdev->irq, sky2_intr,
1718			  (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
1719			  name, hw);
1720	if (err)
1721		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
1722	else {
1723		hw->flags |= SKY2_HW_IRQ_SETUP;
1724
1725		napi_enable(&hw->napi);
1726		sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
1727		sky2_read32(hw, B0_IMSK);
1728	}
1729
1730	return err;
1731}
1732
1733
1734/* Bring up network interface. */
1735static int sky2_open(struct net_device *dev)
1736{
1737	struct sky2_port *sky2 = netdev_priv(dev);
1738	struct sky2_hw *hw = sky2->hw;
1739	unsigned port = sky2->port;
1740	u32 imask;
1741	int err;
1742
1743	netif_carrier_off(dev);
1744
1745	err = sky2_alloc_buffers(sky2);
1746	if (err)
1747		goto err_out;
1748
1749	/* With single port, IRQ is setup when device is brought up */
1750	if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name)))
1751		goto err_out;
1752
1753	sky2_hw_up(sky2);
1754
1755	/* Enable interrupts from phy/mac for port */
1756	imask = sky2_read32(hw, B0_IMSK);
1757
1758	if (hw->chip_id == CHIP_ID_YUKON_OPT ||
1759	    hw->chip_id == CHIP_ID_YUKON_PRM ||
1760	    hw->chip_id == CHIP_ID_YUKON_OP_2)
1761		imask |= Y2_IS_PHY_QLNK;	/* enable PHY Quick Link */
1762
1763	imask |= portirq_msk[port];
1764	sky2_write32(hw, B0_IMSK, imask);
1765	sky2_read32(hw, B0_IMSK);
1766
1767	netif_info(sky2, ifup, dev, "enabling interface\n");
1768
1769	return 0;
1770
1771err_out:
1772	sky2_free_buffers(sky2);
1773	return err;
1774}
1775
1776/* Modular subtraction in ring */
1777static inline int tx_inuse(const struct sky2_port *sky2)
1778{
1779	return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1);
1780}
1781
1782/* Number of list elements available for next tx */
1783static inline int tx_avail(const struct sky2_port *sky2)
1784{
1785	return sky2->tx_pending - tx_inuse(sky2);
1786}
1787
1788/* Estimate of number of transmit list elements required */
1789static unsigned tx_le_req(const struct sk_buff *skb)
1790{
1791	unsigned count;
1792
1793	count = (skb_shinfo(skb)->nr_frags + 1)
1794		* (sizeof(dma_addr_t) / sizeof(u32));
1795
1796	if (skb_is_gso(skb))
1797		++count;
1798	else if (sizeof(dma_addr_t) == sizeof(u32))
1799		++count;	/* possible vlan */
1800
1801	if (skb->ip_summed == CHECKSUM_PARTIAL)
1802		++count;
1803
1804	return count;
1805}
1806
1807static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1808{
1809	if (re->flags & TX_MAP_SINGLE)
1810		dma_unmap_single(&pdev->dev, dma_unmap_addr(re, mapaddr),
1811				 dma_unmap_len(re, maplen), DMA_TO_DEVICE);
1812	else if (re->flags & TX_MAP_PAGE)
1813		dma_unmap_page(&pdev->dev, dma_unmap_addr(re, mapaddr),
1814			       dma_unmap_len(re, maplen), DMA_TO_DEVICE);
1815	re->flags = 0;
1816}
1817
1818/*
1819 * Put one packet in ring for transmit.
1820 * A single packet can generate multiple list elements, and
1821 * the number of ring elements will probably be less than the number
1822 * of list elements used.
1823 */
1824static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1825				   struct net_device *dev)
1826{
1827	struct sky2_port *sky2 = netdev_priv(dev);
1828	struct sky2_hw *hw = sky2->hw;
1829	struct sky2_tx_le *le = NULL;
1830	struct tx_ring_info *re;
1831	unsigned i, len;
1832	dma_addr_t mapping;
1833	u32 upper;
1834	u16 slot;
1835	u16 mss;
1836	u8 ctrl;
1837
1838	if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
1839		return NETDEV_TX_BUSY;
1840
1841	len = skb_headlen(skb);
1842	mapping = dma_map_single(&hw->pdev->dev, skb->data, len,
1843				 DMA_TO_DEVICE);
1844
1845	if (dma_mapping_error(&hw->pdev->dev, mapping))
1846		goto mapping_error;
1847
1848	slot = sky2->tx_prod;
1849	netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
1850		     "tx queued, slot %u, len %d\n", slot, skb->len);
1851
1852	/* Send high bits if needed */
1853	upper = upper_32_bits(mapping);
1854	if (upper != sky2->tx_last_upper) {
1855		le = get_tx_le(sky2, &slot);
1856		le->addr = cpu_to_le32(upper);
1857		sky2->tx_last_upper = upper;
1858		le->opcode = OP_ADDR64 | HW_OWNER;
1859	}
1860
1861	/* Check for TCP Segmentation Offload */
1862	mss = skb_shinfo(skb)->gso_size;
1863	if (mss != 0) {
1864
1865		if (!(hw->flags & SKY2_HW_NEW_LE))
1866			mss += skb_tcp_all_headers(skb);
1867
1868		if (mss != sky2->tx_last_mss) {
1869			le = get_tx_le(sky2, &slot);
1870			le->addr = cpu_to_le32(mss);
1871
1872			if (hw->flags & SKY2_HW_NEW_LE)
1873				le->opcode = OP_MSS | HW_OWNER;
1874			else
1875				le->opcode = OP_LRGLEN | HW_OWNER;
1876			sky2->tx_last_mss = mss;
1877		}
1878	}
1879
1880	ctrl = 0;
1881
1882	/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1883	if (skb_vlan_tag_present(skb)) {
1884		if (!le) {
1885			le = get_tx_le(sky2, &slot);
1886			le->addr = 0;
1887			le->opcode = OP_VLAN|HW_OWNER;
1888		} else
1889			le->opcode |= OP_VLAN;
1890		le->length = cpu_to_be16(skb_vlan_tag_get(skb));
1891		ctrl |= INS_VLAN;
1892	}
1893
1894	/* Handle TCP checksum offload */
1895	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1896		/* On Yukon EX (some versions) encoding change. */
1897		if (hw->flags & SKY2_HW_AUTO_TX_SUM)
1898			ctrl |= CALSUM;	/* auto checksum */
1899		else {
1900			const unsigned offset = skb_transport_offset(skb);
1901			u32 tcpsum;
1902
1903			tcpsum = offset << 16;			/* sum start */
1904			tcpsum |= offset + skb->csum_offset;	/* sum write */
1905
1906			ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1907			if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1908				ctrl |= UDPTCP;
1909
1910			if (tcpsum != sky2->tx_tcpsum) {
1911				sky2->tx_tcpsum = tcpsum;
1912
1913				le = get_tx_le(sky2, &slot);
1914				le->addr = cpu_to_le32(tcpsum);
1915				le->length = 0;	/* initial checksum value */
1916				le->ctrl = 1;	/* one packet */
1917				le->opcode = OP_TCPLISW | HW_OWNER;
1918			}
1919		}
1920	}
1921
1922	re = sky2->tx_ring + slot;
1923	re->flags = TX_MAP_SINGLE;
1924	dma_unmap_addr_set(re, mapaddr, mapping);
1925	dma_unmap_len_set(re, maplen, len);
1926
1927	le = get_tx_le(sky2, &slot);
1928	le->addr = cpu_to_le32(lower_32_bits(mapping));
1929	le->length = cpu_to_le16(len);
1930	le->ctrl = ctrl;
1931	le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1932
1933
1934	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1935		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1936
1937		mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
1938					   skb_frag_size(frag), DMA_TO_DEVICE);
1939
1940		if (dma_mapping_error(&hw->pdev->dev, mapping))
1941			goto mapping_unwind;
1942
1943		upper = upper_32_bits(mapping);
1944		if (upper != sky2->tx_last_upper) {
1945			le = get_tx_le(sky2, &slot);
1946			le->addr = cpu_to_le32(upper);
1947			sky2->tx_last_upper = upper;
1948			le->opcode = OP_ADDR64 | HW_OWNER;
1949		}
1950
1951		re = sky2->tx_ring + slot;
1952		re->flags = TX_MAP_PAGE;
1953		dma_unmap_addr_set(re, mapaddr, mapping);
1954		dma_unmap_len_set(re, maplen, skb_frag_size(frag));
1955
1956		le = get_tx_le(sky2, &slot);
1957		le->addr = cpu_to_le32(lower_32_bits(mapping));
1958		le->length = cpu_to_le16(skb_frag_size(frag));
1959		le->ctrl = ctrl;
1960		le->opcode = OP_BUFFER | HW_OWNER;
1961	}
1962
1963	re->skb = skb;
1964	le->ctrl |= EOP;
1965
1966	sky2->tx_prod = slot;
1967
1968	if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1969		netif_stop_queue(dev);
1970
1971	netdev_sent_queue(dev, skb->len);
1972	sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1973
1974	return NETDEV_TX_OK;
1975
1976mapping_unwind:
1977	for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) {
1978		re = sky2->tx_ring + i;
1979
1980		sky2_tx_unmap(hw->pdev, re);
1981	}
1982
1983mapping_error:
1984	if (net_ratelimit())
1985		dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
1986	dev_kfree_skb_any(skb);
1987	return NETDEV_TX_OK;
1988}
1989
1990/*
1991 * Free ring elements from starting at tx_cons until "done"
1992 *
1993 * NB:
1994 *  1. The hardware will tell us about partial completion of multi-part
1995 *     buffers so make sure not to free skb to early.
1996 *  2. This may run in parallel start_xmit because the it only
1997 *     looks at the tail of the queue of FIFO (tx_cons), not
1998 *     the head (tx_prod)
1999 */
2000static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
2001{
2002	struct net_device *dev = sky2->netdev;
2003	u16 idx;
2004	unsigned int bytes_compl = 0, pkts_compl = 0;
2005
2006	BUG_ON(done >= sky2->tx_ring_size);
2007
2008	for (idx = sky2->tx_cons; idx != done;
2009	     idx = RING_NEXT(idx, sky2->tx_ring_size)) {
2010		struct tx_ring_info *re = sky2->tx_ring + idx;
2011		struct sk_buff *skb = re->skb;
2012
2013		sky2_tx_unmap(sky2->hw->pdev, re);
2014
2015		if (skb) {
2016			netif_printk(sky2, tx_done, KERN_DEBUG, dev,
2017				     "tx done %u\n", idx);
2018
2019			pkts_compl++;
2020			bytes_compl += skb->len;
2021
2022			re->skb = NULL;
2023			dev_kfree_skb_any(skb);
2024
2025			sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
2026		}
2027	}
2028
2029	sky2->tx_cons = idx;
2030	smp_mb();
2031
2032	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2033
2034	u64_stats_update_begin(&sky2->tx_stats.syncp);
2035	sky2->tx_stats.packets += pkts_compl;
2036	sky2->tx_stats.bytes += bytes_compl;
2037	u64_stats_update_end(&sky2->tx_stats.syncp);
2038}
2039
2040static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
2041{
2042	/* Disable Force Sync bit and Enable Alloc bit */
2043	sky2_write8(hw, SK_REG(port, TXA_CTRL),
2044		    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2045
2046	/* Stop Interval Timer and Limit Counter of Tx Arbiter */
2047	sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2048	sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2049
2050	/* Reset the PCI FIFO of the async Tx queue */
2051	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
2052		     BMU_RST_SET | BMU_FIFO_RST);
2053
2054	/* Reset the Tx prefetch units */
2055	sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
2056		     PREF_UNIT_RST_SET);
2057
2058	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2059	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2060
2061	sky2_read32(hw, B0_CTST);
2062}
2063
2064static void sky2_hw_down(struct sky2_port *sky2)
2065{
2066	struct sky2_hw *hw = sky2->hw;
2067	unsigned port = sky2->port;
2068	u16 ctrl;
2069
2070	/* Force flow control off */
2071	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2072
2073	/* Stop transmitter */
2074	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
2075	sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
2076
2077	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2078		     RB_RST_SET | RB_DIS_OP_MD);
2079
2080	ctrl = gma_read16(hw, port, GM_GP_CTRL);
2081	ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
2082	gma_write16(hw, port, GM_GP_CTRL, ctrl);
2083
2084	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
2085
2086	/* Workaround shared GMAC reset */
2087	if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 &&
2088	      port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
2089		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
2090
2091	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2092
2093	/* Force any delayed status interrupt and NAPI */
2094	sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
2095	sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
2096	sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
2097	sky2_read8(hw, STAT_ISR_TIMER_CTRL);
2098
2099	sky2_rx_stop(sky2);
2100
2101	spin_lock_bh(&sky2->phy_lock);
2102	sky2_phy_power_down(hw, port);
2103	spin_unlock_bh(&sky2->phy_lock);
2104
2105	sky2_tx_reset(hw, port);
2106
2107	/* Free any pending frames stuck in HW queue */
2108	sky2_tx_complete(sky2, sky2->tx_prod);
2109}
2110
2111/* Network shutdown */
2112static int sky2_close(struct net_device *dev)
2113{
2114	struct sky2_port *sky2 = netdev_priv(dev);
2115	struct sky2_hw *hw = sky2->hw;
2116
2117	/* Never really got started! */
2118	if (!sky2->tx_le)
2119		return 0;
2120
2121	netif_info(sky2, ifdown, dev, "disabling interface\n");
2122
2123	if (hw->ports == 1) {
2124		sky2_write32(hw, B0_IMSK, 0);
2125		sky2_read32(hw, B0_IMSK);
2126
2127		napi_disable(&hw->napi);
2128		free_irq(hw->pdev->irq, hw);
2129		hw->flags &= ~SKY2_HW_IRQ_SETUP;
2130	} else {
2131		u32 imask;
2132
2133		/* Disable port IRQ */
2134		imask  = sky2_read32(hw, B0_IMSK);
2135		imask &= ~portirq_msk[sky2->port];
2136		sky2_write32(hw, B0_IMSK, imask);
2137		sky2_read32(hw, B0_IMSK);
2138
2139		synchronize_irq(hw->pdev->irq);
2140		napi_synchronize(&hw->napi);
2141	}
2142
2143	sky2_hw_down(sky2);
2144
2145	sky2_free_buffers(sky2);
2146
2147	return 0;
2148}
2149
2150static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
2151{
2152	if (hw->flags & SKY2_HW_FIBRE_PHY)
2153		return SPEED_1000;
2154
2155	if (!(hw->flags & SKY2_HW_GIGABIT)) {
2156		if (aux & PHY_M_PS_SPEED_100)
2157			return SPEED_100;
2158		else
2159			return SPEED_10;
2160	}
2161
2162	switch (aux & PHY_M_PS_SPEED_MSK) {
2163	case PHY_M_PS_SPEED_1000:
2164		return SPEED_1000;
2165	case PHY_M_PS_SPEED_100:
2166		return SPEED_100;
2167	default:
2168		return SPEED_10;
2169	}
2170}
2171
2172static void sky2_link_up(struct sky2_port *sky2)
2173{
2174	struct sky2_hw *hw = sky2->hw;
2175	unsigned port = sky2->port;
2176	static const char *fc_name[] = {
2177		[FC_NONE]	= "none",
2178		[FC_TX]		= "tx",
2179		[FC_RX]		= "rx",
2180		[FC_BOTH]	= "both",
2181	};
2182
2183	sky2_set_ipg(sky2);
2184
2185	sky2_enable_rx_tx(sky2);
2186
2187	gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
2188
2189	netif_carrier_on(sky2->netdev);
2190
2191	mod_timer(&hw->watchdog_timer, jiffies + 1);
2192
2193	/* Turn on link LED */
2194	sky2_write8(hw, SK_REG(port, LNK_LED_REG),
2195		    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
2196
2197	netif_info(sky2, link, sky2->netdev,
2198		   "Link is up at %d Mbps, %s duplex, flow control %s\n",
2199		   sky2->speed,
2200		   sky2->duplex == DUPLEX_FULL ? "full" : "half",
2201		   fc_name[sky2->flow_status]);
2202}
2203
2204static void sky2_link_down(struct sky2_port *sky2)
2205{
2206	struct sky2_hw *hw = sky2->hw;
2207	unsigned port = sky2->port;
2208	u16 reg;
2209
2210	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
2211
2212	reg = gma_read16(hw, port, GM_GP_CTRL);
2213	reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2214	gma_write16(hw, port, GM_GP_CTRL, reg);
2215
2216	netif_carrier_off(sky2->netdev);
2217
2218	/* Turn off link LED */
2219	sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
2220
2221	netif_info(sky2, link, sky2->netdev, "Link is down\n");
2222
2223	sky2_phy_init(hw, port);
2224}
2225
2226static enum flow_control sky2_flow(int rx, int tx)
2227{
2228	if (rx)
2229		return tx ? FC_BOTH : FC_RX;
2230	else
2231		return tx ? FC_TX : FC_NONE;
2232}
2233
2234static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
2235{
2236	struct sky2_hw *hw = sky2->hw;
2237	unsigned port = sky2->port;
2238	u16 advert, lpa;
2239
2240	advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
2241	lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
2242	if (lpa & PHY_M_AN_RF) {
2243		netdev_err(sky2->netdev, "remote fault\n");
2244		return -1;
2245	}
2246
2247	if (!(aux & PHY_M_PS_SPDUP_RES)) {
2248		netdev_err(sky2->netdev, "speed/duplex mismatch\n");
2249		return -1;
2250	}
2251
2252	sky2->speed = sky2_phy_speed(hw, aux);
2253	sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2254
2255	/* Since the pause result bits seem to in different positions on
2256	 * different chips. look at registers.
2257	 */
2258	if (hw->flags & SKY2_HW_FIBRE_PHY) {
2259		/* Shift for bits in fiber PHY */
2260		advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
2261		lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
2262
2263		if (advert & ADVERTISE_1000XPAUSE)
2264			advert |= ADVERTISE_PAUSE_CAP;
2265		if (advert & ADVERTISE_1000XPSE_ASYM)
2266			advert |= ADVERTISE_PAUSE_ASYM;
2267		if (lpa & LPA_1000XPAUSE)
2268			lpa |= LPA_PAUSE_CAP;
2269		if (lpa & LPA_1000XPAUSE_ASYM)
2270			lpa |= LPA_PAUSE_ASYM;
2271	}
2272
2273	sky2->flow_status = FC_NONE;
2274	if (advert & ADVERTISE_PAUSE_CAP) {
2275		if (lpa & LPA_PAUSE_CAP)
2276			sky2->flow_status = FC_BOTH;
2277		else if (advert & ADVERTISE_PAUSE_ASYM)
2278			sky2->flow_status = FC_RX;
2279	} else if (advert & ADVERTISE_PAUSE_ASYM) {
2280		if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
2281			sky2->flow_status = FC_TX;
2282	}
2283
2284	if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 &&
2285	    !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
2286		sky2->flow_status = FC_NONE;
2287
2288	if (sky2->flow_status & FC_TX)
2289		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2290	else
2291		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2292
2293	return 0;
2294}
2295
2296/* Interrupt from PHY */
2297static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
2298{
2299	struct net_device *dev = hw->dev[port];
2300	struct sky2_port *sky2 = netdev_priv(dev);
2301	u16 istatus, phystat;
2302
2303	if (!netif_running(dev))
2304		return;
2305
2306	spin_lock(&sky2->phy_lock);
2307	istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2308	phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2309
2310	netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
2311		   istatus, phystat);
2312
2313	if (istatus & PHY_M_IS_AN_COMPL) {
2314		if (sky2_autoneg_done(sky2, phystat) == 0 &&
2315		    !netif_carrier_ok(dev))
2316			sky2_link_up(sky2);
2317		goto out;
2318	}
2319
2320	if (istatus & PHY_M_IS_LSP_CHANGE)
2321		sky2->speed = sky2_phy_speed(hw, phystat);
2322
2323	if (istatus & PHY_M_IS_DUP_CHANGE)
2324		sky2->duplex =
2325		    (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2326
2327	if (istatus & PHY_M_IS_LST_CHANGE) {
2328		if (phystat & PHY_M_PS_LINK_UP)
2329			sky2_link_up(sky2);
2330		else
2331			sky2_link_down(sky2);
2332	}
2333out:
2334	spin_unlock(&sky2->phy_lock);
2335}
2336
2337/* Special quick link interrupt (Yukon-2 Optima only) */
2338static void sky2_qlink_intr(struct sky2_hw *hw)
2339{
2340	struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
2341	u32 imask;
2342	u16 phy;
2343
2344	/* disable irq */
2345	imask = sky2_read32(hw, B0_IMSK);
2346	imask &= ~Y2_IS_PHY_QLNK;
2347	sky2_write32(hw, B0_IMSK, imask);
2348
2349	/* reset PHY Link Detect */
2350	phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2351	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2352	sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2353	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2354
2355	sky2_link_up(sky2);
2356}
2357
2358/* Transmit timeout is only called if we are running, carrier is up
2359 * and tx queue is full (stopped).
2360 */
2361static void sky2_tx_timeout(struct net_device *dev, unsigned int txqueue)
2362{
2363	struct sky2_port *sky2 = netdev_priv(dev);
2364	struct sky2_hw *hw = sky2->hw;
2365
2366	netif_err(sky2, timer, dev, "tx timeout\n");
2367
2368	netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n",
2369		      sky2->tx_cons, sky2->tx_prod,
2370		      sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
2371		      sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
2372
2373	/* can't restart safely under softirq */
2374	schedule_work(&hw->restart_work);
2375}
2376
2377static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2378{
2379	struct sky2_port *sky2 = netdev_priv(dev);
2380	struct sky2_hw *hw = sky2->hw;
2381	unsigned port = sky2->port;
2382	int err;
2383	u16 ctl, mode;
2384	u32 imask;
2385
2386	if (!netif_running(dev)) {
2387		dev->mtu = new_mtu;
2388		netdev_update_features(dev);
2389		return 0;
2390	}
2391
2392	imask = sky2_read32(hw, B0_IMSK);
2393	sky2_write32(hw, B0_IMSK, 0);
2394	sky2_read32(hw, B0_IMSK);
2395
2396	netif_trans_update(dev);	/* prevent tx timeout */
2397	napi_disable(&hw->napi);
2398	netif_tx_disable(dev);
2399
2400	synchronize_irq(hw->pdev->irq);
2401
2402	if (!(hw->flags & SKY2_HW_RAM_BUFFER))
2403		sky2_set_tx_stfwd(hw, port);
2404
2405	ctl = gma_read16(hw, port, GM_GP_CTRL);
2406	gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
2407	sky2_rx_stop(sky2);
2408	sky2_rx_clean(sky2);
2409
2410	dev->mtu = new_mtu;
2411	netdev_update_features(dev);
2412
2413	mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |	GM_SMOD_VLAN_ENA;
2414	if (sky2->speed > SPEED_100)
2415		mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
2416	else
2417		mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
2418
2419	if (dev->mtu > ETH_DATA_LEN)
2420		mode |= GM_SMOD_JUMBO_ENA;
2421
2422	gma_write16(hw, port, GM_SERIAL_MODE, mode);
2423
2424	sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
2425
2426	err = sky2_alloc_rx_skbs(sky2);
2427	if (!err)
2428		sky2_rx_start(sky2);
2429	else
2430		sky2_rx_clean(sky2);
2431	sky2_write32(hw, B0_IMSK, imask);
2432
2433	sky2_read32(hw, B0_Y2_SP_LISR);
2434	napi_enable(&hw->napi);
2435
2436	if (err)
2437		dev_close(dev);
2438	else {
2439		gma_write16(hw, port, GM_GP_CTRL, ctl);
2440
2441		netif_wake_queue(dev);
2442	}
2443
2444	return err;
2445}
2446
2447static inline bool needs_copy(const struct rx_ring_info *re,
2448			      unsigned length)
2449{
2450#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2451	/* Some architectures need the IP header to be aligned */
2452	if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32)))
2453		return true;
2454#endif
2455	return length < copybreak;
2456}
2457
2458/* For small just reuse existing skb for next receive */
2459static struct sk_buff *receive_copy(struct sky2_port *sky2,
2460				    const struct rx_ring_info *re,
2461				    unsigned length)
2462{
2463	struct sk_buff *skb;
2464
2465	skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
2466	if (likely(skb)) {
2467		dma_sync_single_for_cpu(&sky2->hw->pdev->dev, re->data_addr,
2468					length, DMA_FROM_DEVICE);
2469		skb_copy_from_linear_data(re->skb, skb->data, length);
2470		skb->ip_summed = re->skb->ip_summed;
2471		skb->csum = re->skb->csum;
2472		skb_copy_hash(skb, re->skb);
2473		__vlan_hwaccel_copy_tag(skb, re->skb);
2474
2475		dma_sync_single_for_device(&sky2->hw->pdev->dev,
2476					   re->data_addr, length,
2477					   DMA_FROM_DEVICE);
2478		__vlan_hwaccel_clear_tag(re->skb);
2479		skb_clear_hash(re->skb);
2480		re->skb->ip_summed = CHECKSUM_NONE;
2481		skb_put(skb, length);
2482	}
2483	return skb;
2484}
2485
2486/* Adjust length of skb with fragments to match received data */
2487static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
2488			  unsigned int length)
2489{
2490	int i, num_frags;
2491	unsigned int size;
2492
2493	/* put header into skb */
2494	size = min(length, hdr_space);
2495	skb->tail += size;
2496	skb->len += size;
2497	length -= size;
2498
2499	num_frags = skb_shinfo(skb)->nr_frags;
2500	for (i = 0; i < num_frags; i++) {
2501		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2502
2503		if (length == 0) {
2504			/* don't need this page */
2505			__skb_frag_unref(frag, false);
2506			--skb_shinfo(skb)->nr_frags;
2507		} else {
2508			size = min(length, (unsigned) PAGE_SIZE);
2509
2510			skb_frag_size_set(frag, size);
2511			skb->data_len += size;
2512			skb->truesize += PAGE_SIZE;
2513			skb->len += size;
2514			length -= size;
2515		}
2516	}
2517}
2518
2519/* Normal packet - take skb from ring element and put in a new one  */
2520static struct sk_buff *receive_new(struct sky2_port *sky2,
2521				   struct rx_ring_info *re,
2522				   unsigned int length)
2523{
2524	struct sk_buff *skb;
2525	struct rx_ring_info nre;
2526	unsigned hdr_space = sky2->rx_data_size;
2527
2528	nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC);
2529	if (unlikely(!nre.skb))
2530		goto nobuf;
2531
2532	if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
2533		goto nomap;
2534
2535	skb = re->skb;
2536	sky2_rx_unmap_skb(sky2->hw->pdev, re);
2537	prefetch(skb->data);
2538	*re = nre;
2539
2540	if (skb_shinfo(skb)->nr_frags)
2541		skb_put_frags(skb, hdr_space, length);
2542	else
2543		skb_put(skb, length);
2544	return skb;
2545
2546nomap:
2547	dev_kfree_skb(nre.skb);
2548nobuf:
2549	return NULL;
2550}
2551
2552/*
2553 * Receive one packet.
2554 * For larger packets, get new buffer.
2555 */
2556static struct sk_buff *sky2_receive(struct net_device *dev,
2557				    u16 length, u32 status)
2558{
2559	struct sky2_port *sky2 = netdev_priv(dev);
2560	struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
2561	struct sk_buff *skb = NULL;
2562	u16 count = (status & GMR_FS_LEN) >> 16;
2563
2564	netif_printk(sky2, rx_status, KERN_DEBUG, dev,
2565		     "rx slot %u status 0x%x len %d\n",
2566		     sky2->rx_next, status, length);
2567
2568	sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2569	prefetch(sky2->rx_ring + sky2->rx_next);
2570
2571	if (skb_vlan_tag_present(re->skb))
2572		count -= VLAN_HLEN;	/* Account for vlan tag */
2573
2574	/* This chip has hardware problems that generates bogus status.
2575	 * So do only marginal checking and expect higher level protocols
2576	 * to handle crap frames.
2577	 */
2578	if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
2579	    sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
2580	    length != count)
2581		goto okay;
2582
2583	if (status & GMR_FS_ANY_ERR)
2584		goto error;
2585
2586	if (!(status & GMR_FS_RX_OK))
2587		goto resubmit;
2588
2589	/* if length reported by DMA does not match PHY, packet was truncated */
2590	if (length != count)
2591		goto error;
2592
2593okay:
2594	if (needs_copy(re, length))
2595		skb = receive_copy(sky2, re, length);
2596	else
2597		skb = receive_new(sky2, re, length);
2598
2599	dev->stats.rx_dropped += (skb == NULL);
2600
2601resubmit:
2602	sky2_rx_submit(sky2, re);
2603
2604	return skb;
2605
2606error:
2607	++dev->stats.rx_errors;
2608
2609	if (net_ratelimit())
2610		netif_info(sky2, rx_err, dev,
2611			   "rx error, status 0x%x length %d\n", status, length);
2612
2613	goto resubmit;
2614}
2615
2616/* Transmit complete */
2617static inline void sky2_tx_done(struct net_device *dev, u16 last)
2618{
2619	struct sky2_port *sky2 = netdev_priv(dev);
2620
2621	if (netif_running(dev)) {
2622		sky2_tx_complete(sky2, last);
2623
2624		/* Wake unless it's detached, and called e.g. from sky2_close() */
2625		if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
2626			netif_wake_queue(dev);
2627	}
2628}
2629
2630static inline void sky2_skb_rx(const struct sky2_port *sky2,
2631			       struct sk_buff *skb)
2632{
2633	if (skb->ip_summed == CHECKSUM_NONE)
2634		netif_receive_skb(skb);
2635	else
2636		napi_gro_receive(&sky2->hw->napi, skb);
2637}
2638
2639static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
2640				unsigned packets, unsigned bytes)
2641{
2642	struct net_device *dev = hw->dev[port];
2643	struct sky2_port *sky2 = netdev_priv(dev);
2644
2645	if (packets == 0)
2646		return;
2647
2648	u64_stats_update_begin(&sky2->rx_stats.syncp);
2649	sky2->rx_stats.packets += packets;
2650	sky2->rx_stats.bytes += bytes;
2651	u64_stats_update_end(&sky2->rx_stats.syncp);
2652
2653	sky2->last_rx = jiffies;
2654	sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
2655}
2656
2657static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
2658{
2659	/* If this happens then driver assuming wrong format for chip type */
2660	BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
2661
2662	/* Both checksum counters are programmed to start at
2663	 * the same offset, so unless there is a problem they
2664	 * should match. This failure is an early indication that
2665	 * hardware receive checksumming won't work.
2666	 */
2667	if (likely((u16)(status >> 16) == (u16)status)) {
2668		struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
2669		skb->ip_summed = CHECKSUM_COMPLETE;
2670		skb->csum = le16_to_cpu(status);
2671	} else {
2672		dev_notice(&sky2->hw->pdev->dev,
2673			   "%s: receive checksum problem (status = %#x)\n",
2674			   sky2->netdev->name, status);
2675
2676		/* Disable checksum offload
2677		 * It will be reenabled on next ndo_set_features, but if it's
2678		 * really broken, will get disabled again
2679		 */
2680		sky2->netdev->features &= ~NETIF_F_RXCSUM;
2681		sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2682			     BMU_DIS_RX_CHKSUM);
2683	}
2684}
2685
2686static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
2687{
2688	struct sk_buff *skb;
2689
2690	skb = sky2->rx_ring[sky2->rx_next].skb;
2691	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
2692}
2693
2694static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
2695{
2696	struct sk_buff *skb;
2697
2698	skb = sky2->rx_ring[sky2->rx_next].skb;
2699	skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3);
2700}
2701
2702/* Process status response ring */
2703static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2704{
2705	int work_done = 0;
2706	unsigned int total_bytes[2] = { 0 };
2707	unsigned int total_packets[2] = { 0 };
2708
2709	if (to_do <= 0)
2710		return work_done;
2711
2712	rmb();
2713	do {
2714		struct sky2_port *sky2;
2715		struct sky2_status_le *le  = hw->st_le + hw->st_idx;
2716		unsigned port;
2717		struct net_device *dev;
2718		struct sk_buff *skb;
2719		u32 status;
2720		u16 length;
2721		u8 opcode = le->opcode;
2722
2723		if (!(opcode & HW_OWNER))
2724			break;
2725
2726		hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
2727
2728		port = le->css & CSS_LINK_BIT;
2729		dev = hw->dev[port];
2730		sky2 = netdev_priv(dev);
2731		length = le16_to_cpu(le->length);
2732		status = le32_to_cpu(le->status);
2733
2734		le->opcode = 0;
2735		switch (opcode & ~HW_OWNER) {
2736		case OP_RXSTAT:
2737			total_packets[port]++;
2738			total_bytes[port] += length;
2739
2740			skb = sky2_receive(dev, length, status);
2741			if (!skb)
2742				break;
2743
2744			/* This chip reports checksum status differently */
2745			if (hw->flags & SKY2_HW_NEW_LE) {
2746				if ((dev->features & NETIF_F_RXCSUM) &&
2747				    (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
2748				    (le->css & CSS_TCPUDPCSOK))
2749					skb->ip_summed = CHECKSUM_UNNECESSARY;
2750				else
2751					skb->ip_summed = CHECKSUM_NONE;
2752			}
2753
2754			skb->protocol = eth_type_trans(skb, dev);
2755			sky2_skb_rx(sky2, skb);
2756
2757			/* Stop after net poll weight */
2758			if (++work_done >= to_do)
2759				goto exit_loop;
2760			break;
2761
2762		case OP_RXVLAN:
2763			sky2_rx_tag(sky2, length);
2764			break;
2765
2766		case OP_RXCHKSVLAN:
2767			sky2_rx_tag(sky2, length);
2768			fallthrough;
2769		case OP_RXCHKS:
2770			if (likely(dev->features & NETIF_F_RXCSUM))
2771				sky2_rx_checksum(sky2, status);
2772			break;
2773
2774		case OP_RSS_HASH:
2775			sky2_rx_hash(sky2, status);
2776			break;
2777
2778		case OP_TXINDEXLE:
2779			/* TX index reports status for both ports */
2780			sky2_tx_done(hw->dev[0], status & 0xfff);
2781			if (hw->dev[1])
2782				sky2_tx_done(hw->dev[1],
2783				     ((status >> 24) & 0xff)
2784					     | (u16)(length & 0xf) << 8);
2785			break;
2786
2787		default:
2788			if (net_ratelimit())
2789				pr_warn("unknown status opcode 0x%x\n", opcode);
2790		}
2791	} while (hw->st_idx != idx);
2792
2793	/* Fully processed status ring so clear irq */
2794	sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2795
2796exit_loop:
2797	sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
2798	sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
2799
2800	return work_done;
2801}
2802
2803static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
2804{
2805	struct net_device *dev = hw->dev[port];
2806
2807	if (net_ratelimit())
2808		netdev_info(dev, "hw error interrupt status 0x%x\n", status);
2809
2810	if (status & Y2_IS_PAR_RD1) {
2811		if (net_ratelimit())
2812			netdev_err(dev, "ram data read parity error\n");
2813		/* Clear IRQ */
2814		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
2815	}
2816
2817	if (status & Y2_IS_PAR_WR1) {
2818		if (net_ratelimit())
2819			netdev_err(dev, "ram data write parity error\n");
2820
2821		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
2822	}
2823
2824	if (status & Y2_IS_PAR_MAC1) {
2825		if (net_ratelimit())
2826			netdev_err(dev, "MAC parity error\n");
2827		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
2828	}
2829
2830	if (status & Y2_IS_PAR_RX1) {
2831		if (net_ratelimit())
2832			netdev_err(dev, "RX parity error\n");
2833		sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
2834	}
2835
2836	if (status & Y2_IS_TCP_TXA1) {
2837		if (net_ratelimit())
2838			netdev_err(dev, "TCP segmentation error\n");
2839		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
2840	}
2841}
2842
2843static void sky2_hw_intr(struct sky2_hw *hw)
2844{
2845	struct pci_dev *pdev = hw->pdev;
2846	u32 status = sky2_read32(hw, B0_HWE_ISRC);
2847	u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2848
2849	status &= hwmsk;
2850
2851	if (status & Y2_IS_TIST_OV)
2852		sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2853
2854	if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2855		u16 pci_err;
2856
2857		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2858		pci_err = sky2_pci_read16(hw, PCI_STATUS);
2859		if (net_ratelimit())
2860			dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
2861			        pci_err);
2862
2863		sky2_pci_write16(hw, PCI_STATUS,
2864				      pci_err | PCI_STATUS_ERROR_BITS);
2865		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2866	}
2867
2868	if (status & Y2_IS_PCI_EXP) {
2869		/* PCI-Express uncorrectable Error occurred */
2870		u32 err;
2871
2872		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2873		err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2874		sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2875			     0xfffffffful);
2876		if (net_ratelimit())
2877			dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
2878
2879		sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2880		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2881	}
2882
2883	if (status & Y2_HWE_L1_MASK)
2884		sky2_hw_error(hw, 0, status);
2885	status >>= 8;
2886	if (status & Y2_HWE_L1_MASK)
2887		sky2_hw_error(hw, 1, status);
2888}
2889
2890static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2891{
2892	struct net_device *dev = hw->dev[port];
2893	struct sky2_port *sky2 = netdev_priv(dev);
2894	u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2895
2896	netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
2897
2898	if (status & GM_IS_RX_CO_OV)
2899		gma_read16(hw, port, GM_RX_IRQ_SRC);
2900
2901	if (status & GM_IS_TX_CO_OV)
2902		gma_read16(hw, port, GM_TX_IRQ_SRC);
2903
2904	if (status & GM_IS_RX_FF_OR) {
2905		++dev->stats.rx_fifo_errors;
2906		sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2907	}
2908
2909	if (status & GM_IS_TX_FF_UR) {
2910		++dev->stats.tx_fifo_errors;
2911		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2912	}
2913}
2914
2915/* This should never happen it is a bug. */
2916static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
2917{
2918	struct net_device *dev = hw->dev[port];
2919	u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
2920
2921	dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n",
2922		dev->name, (unsigned) q, (unsigned) idx,
2923		(unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
2924
2925	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
2926}
2927
2928static int sky2_rx_hung(struct net_device *dev)
2929{
2930	struct sky2_port *sky2 = netdev_priv(dev);
2931	struct sky2_hw *hw = sky2->hw;
2932	unsigned port = sky2->port;
2933	unsigned rxq = rxqaddr[port];
2934	u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
2935	u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
2936	u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
2937	u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
2938
2939	/* If idle and MAC or PCI is stuck */
2940	if (sky2->check.last == sky2->last_rx &&
2941	    ((mac_rp == sky2->check.mac_rp &&
2942	      mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
2943	     /* Check if the PCI RX hang */
2944	     (fifo_rp == sky2->check.fifo_rp &&
2945	      fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
2946		netdev_printk(KERN_DEBUG, dev,
2947			      "hung mac %d:%d fifo %d (%d:%d)\n",
2948			      mac_lev, mac_rp, fifo_lev,
2949			      fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
2950		return 1;
2951	} else {
2952		sky2->check.last = sky2->last_rx;
2953		sky2->check.mac_rp = mac_rp;
2954		sky2->check.mac_lev = mac_lev;
2955		sky2->check.fifo_rp = fifo_rp;
2956		sky2->check.fifo_lev = fifo_lev;
2957		return 0;
2958	}
2959}
2960
2961static void sky2_watchdog(struct timer_list *t)
2962{
2963	struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
2964
2965	/* Check for lost IRQ once a second */
2966	if (sky2_read32(hw, B0_ISRC)) {
2967		napi_schedule(&hw->napi);
2968	} else {
2969		int i, active = 0;
2970
2971		for (i = 0; i < hw->ports; i++) {
2972			struct net_device *dev = hw->dev[i];
2973			if (!netif_running(dev))
2974				continue;
2975			++active;
2976
2977			/* For chips with Rx FIFO, check if stuck */
2978			if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
2979			     sky2_rx_hung(dev)) {
2980				netdev_info(dev, "receiver hang detected\n");
2981				schedule_work(&hw->restart_work);
2982				return;
2983			}
2984		}
2985
2986		if (active == 0)
2987			return;
2988	}
2989
2990	mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
2991}
2992
2993/* Hardware/software error handling */
2994static void sky2_err_intr(struct sky2_hw *hw, u32 status)
2995{
2996	if (net_ratelimit())
2997		dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
2998
2999	if (status & Y2_IS_HW_ERR)
3000		sky2_hw_intr(hw);
3001
3002	if (status & Y2_IS_IRQ_MAC1)
3003		sky2_mac_intr(hw, 0);
3004
3005	if (status & Y2_IS_IRQ_MAC2)
3006		sky2_mac_intr(hw, 1);
3007
3008	if (status & Y2_IS_CHK_RX1)
3009		sky2_le_error(hw, 0, Q_R1);
3010
3011	if (status & Y2_IS_CHK_RX2)
3012		sky2_le_error(hw, 1, Q_R2);
3013
3014	if (status & Y2_IS_CHK_TXA1)
3015		sky2_le_error(hw, 0, Q_XA1);
3016
3017	if (status & Y2_IS_CHK_TXA2)
3018		sky2_le_error(hw, 1, Q_XA2);
3019}
3020
3021static int sky2_poll(struct napi_struct *napi, int work_limit)
3022{
3023	struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi);
3024	u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
3025	int work_done = 0;
3026	u16 idx;
3027
3028	if (unlikely(status & Y2_IS_ERROR))
3029		sky2_err_intr(hw, status);
3030
3031	if (status & Y2_IS_IRQ_PHY1)
3032		sky2_phy_intr(hw, 0);
3033
3034	if (status & Y2_IS_IRQ_PHY2)
3035		sky2_phy_intr(hw, 1);
3036
3037	if (status & Y2_IS_PHY_QLNK)
3038		sky2_qlink_intr(hw);
3039
3040	while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
3041		work_done += sky2_status_intr(hw, work_limit - work_done, idx);
3042
3043		if (work_done >= work_limit)
3044			goto done;
3045	}
3046
3047	napi_complete_done(napi, work_done);
3048	sky2_read32(hw, B0_Y2_SP_LISR);
3049done:
3050
3051	return work_done;
3052}
3053
3054static irqreturn_t sky2_intr(int irq, void *dev_id)
3055{
3056	struct sky2_hw *hw = dev_id;
3057	u32 status;
3058
3059	/* Reading this mask interrupts as side effect */
3060	status = sky2_read32(hw, B0_Y2_SP_ISRC2);
3061	if (status == 0 || status == ~0) {
3062		sky2_write32(hw, B0_Y2_SP_ICR, 2);
3063		return IRQ_NONE;
3064	}
3065
3066	prefetch(&hw->st_le[hw->st_idx]);
3067
3068	napi_schedule(&hw->napi);
3069
3070	return IRQ_HANDLED;
3071}
3072
3073#ifdef CONFIG_NET_POLL_CONTROLLER
3074static void sky2_netpoll(struct net_device *dev)
3075{
3076	struct sky2_port *sky2 = netdev_priv(dev);
3077
3078	napi_schedule(&sky2->hw->napi);
3079}
3080#endif
3081
3082/* Chip internal frequency for clock calculations */
3083static u32 sky2_mhz(const struct sky2_hw *hw)
3084{
3085	switch (hw->chip_id) {
3086	case CHIP_ID_YUKON_EC:
3087	case CHIP_ID_YUKON_EC_U:
3088	case CHIP_ID_YUKON_EX:
3089	case CHIP_ID_YUKON_SUPR:
3090	case CHIP_ID_YUKON_UL_2:
3091	case CHIP_ID_YUKON_OPT:
3092	case CHIP_ID_YUKON_PRM:
3093	case CHIP_ID_YUKON_OP_2:
3094		return 125;
3095
3096	case CHIP_ID_YUKON_FE:
3097		return 100;
3098
3099	case CHIP_ID_YUKON_FE_P:
3100		return 50;
3101
3102	case CHIP_ID_YUKON_XL:
3103		return 156;
3104
3105	default:
3106		BUG();
3107	}
3108}
3109
3110static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
3111{
3112	return sky2_mhz(hw) * us;
3113}
3114
3115static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
3116{
3117	return clk / sky2_mhz(hw);
3118}
3119
3120
3121static int sky2_init(struct sky2_hw *hw)
3122{
3123	u8 t8;
3124
3125	/* Enable all clocks and check for bad PCI access */
3126	sky2_pci_write32(hw, PCI_DEV_REG3, 0);
3127
3128	sky2_write8(hw, B0_CTST, CS_RST_CLR);
3129
3130	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
3131	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
3132
3133	switch (hw->chip_id) {
3134	case CHIP_ID_YUKON_XL:
3135		hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
3136		if (hw->chip_rev < CHIP_REV_YU_XL_A2)
3137			hw->flags |= SKY2_HW_RSS_BROKEN;
3138		break;
3139
3140	case CHIP_ID_YUKON_EC_U:
3141		hw->flags = SKY2_HW_GIGABIT
3142			| SKY2_HW_NEWER_PHY
3143			| SKY2_HW_ADV_POWER_CTL;
3144		break;
3145
3146	case CHIP_ID_YUKON_EX:
3147		hw->flags = SKY2_HW_GIGABIT
3148			| SKY2_HW_NEWER_PHY
3149			| SKY2_HW_NEW_LE
3150			| SKY2_HW_ADV_POWER_CTL
3151			| SKY2_HW_RSS_CHKSUM;
3152
3153		/* New transmit checksum */
3154		if (hw->chip_rev != CHIP_REV_YU_EX_B0)
3155			hw->flags |= SKY2_HW_AUTO_TX_SUM;
3156		break;
3157
3158	case CHIP_ID_YUKON_EC:
3159		/* This rev is really old, and requires untested workarounds */
3160		if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
3161			dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
3162			return -EOPNOTSUPP;
3163		}
3164		hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
3165		break;
3166
3167	case CHIP_ID_YUKON_FE:
3168		hw->flags = SKY2_HW_RSS_BROKEN;
3169		break;
3170
3171	case CHIP_ID_YUKON_FE_P:
3172		hw->flags = SKY2_HW_NEWER_PHY
3173			| SKY2_HW_NEW_LE
3174			| SKY2_HW_AUTO_TX_SUM
3175			| SKY2_HW_ADV_POWER_CTL;
3176
3177		/* The workaround for status conflicts VLAN tag detection. */
3178		if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
3179			hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM;
3180		break;
3181
3182	case CHIP_ID_YUKON_SUPR:
3183		hw->flags = SKY2_HW_GIGABIT
3184			| SKY2_HW_NEWER_PHY
3185			| SKY2_HW_NEW_LE
3186			| SKY2_HW_AUTO_TX_SUM
3187			| SKY2_HW_ADV_POWER_CTL;
3188
3189		if (hw->chip_rev == CHIP_REV_YU_SU_A0)
3190			hw->flags |= SKY2_HW_RSS_CHKSUM;
3191		break;
3192
3193	case CHIP_ID_YUKON_UL_2:
3194		hw->flags = SKY2_HW_GIGABIT
3195			| SKY2_HW_ADV_POWER_CTL;
3196		break;
3197
3198	case CHIP_ID_YUKON_OPT:
3199	case CHIP_ID_YUKON_PRM:
3200	case CHIP_ID_YUKON_OP_2:
3201		hw->flags = SKY2_HW_GIGABIT
3202			| SKY2_HW_NEW_LE
3203			| SKY2_HW_ADV_POWER_CTL;
3204		break;
3205
3206	default:
3207		dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
3208			hw->chip_id);
3209		return -EOPNOTSUPP;
3210	}
3211
3212	hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
3213	if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
3214		hw->flags |= SKY2_HW_FIBRE_PHY;
3215
3216	hw->ports = 1;
3217	t8 = sky2_read8(hw, B2_Y2_HW_RES);
3218	if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
3219		if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
3220			++hw->ports;
3221	}
3222
3223	if (sky2_read8(hw, B2_E_0))
3224		hw->flags |= SKY2_HW_RAM_BUFFER;
3225
3226	return 0;
3227}
3228
3229static void sky2_reset(struct sky2_hw *hw)
3230{
3231	struct pci_dev *pdev = hw->pdev;
3232	u16 status;
3233	int i;
3234	u32 hwe_mask = Y2_HWE_ALL_MASK;
3235
3236	/* disable ASF */
3237	if (hw->chip_id == CHIP_ID_YUKON_EX
3238	    || hw->chip_id == CHIP_ID_YUKON_SUPR) {
3239		sky2_write32(hw, CPU_WDOG, 0);
3240		status = sky2_read16(hw, HCU_CCSR);
3241		status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
3242			    HCU_CCSR_UC_STATE_MSK);
3243		/*
3244		 * CPU clock divider shouldn't be used because
3245		 * - ASF firmware may malfunction
3246		 * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
3247		 */
3248		status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
3249		sky2_write16(hw, HCU_CCSR, status);
3250		sky2_write32(hw, CPU_WDOG, 0);
3251	} else
3252		sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
3253	sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
3254
3255	/* do a SW reset */
3256	sky2_write8(hw, B0_CTST, CS_RST_SET);
3257	sky2_write8(hw, B0_CTST, CS_RST_CLR);
3258
3259	/* allow writes to PCI config */
3260	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3261
3262	/* clear PCI errors, if any */
3263	status = sky2_pci_read16(hw, PCI_STATUS);
3264	status |= PCI_STATUS_ERROR_BITS;
3265	sky2_pci_write16(hw, PCI_STATUS, status);
3266
3267	sky2_write8(hw, B0_CTST, CS_MRST_CLR);
3268
3269	if (pci_is_pcie(pdev)) {
3270		sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
3271			     0xfffffffful);
3272
3273		/* If error bit is stuck on ignore it */
3274		if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
3275			dev_info(&pdev->dev, "ignoring stuck error report bit\n");
3276		else
3277			hwe_mask |= Y2_IS_PCI_EXP;
3278	}
3279
3280	sky2_power_on(hw);
3281	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3282
3283	for (i = 0; i < hw->ports; i++) {
3284		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3285		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3286
3287		if (hw->chip_id == CHIP_ID_YUKON_EX ||
3288		    hw->chip_id == CHIP_ID_YUKON_SUPR)
3289			sky2_write16(hw, SK_REG(i, GMAC_CTRL),
3290				     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
3291				     | GMC_BYP_RETR_ON);
3292
3293	}
3294
3295	if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
3296		/* enable MACSec clock gating */
3297		sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
3298	}
3299
3300	if (hw->chip_id == CHIP_ID_YUKON_OPT ||
3301	    hw->chip_id == CHIP_ID_YUKON_PRM ||
3302	    hw->chip_id == CHIP_ID_YUKON_OP_2) {
3303		u16 reg;
3304
3305		if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
3306			/* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
3307			sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
3308
3309			/* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
3310			reg = 10;
3311
3312			/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3313			sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
3314		} else {
3315			/* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
3316			reg = 3;
3317		}
3318
3319		reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3320		reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT;
3321
3322		/* reset PHY Link Detect */
3323		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3324		sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
3325
3326		/* check if PSMv2 was running before */
3327		reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3328		if (reg & PCI_EXP_LNKCTL_ASPMC)
3329			/* restore the PCIe Link Control register */
3330			sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
3331					 reg);
3332
3333		if (hw->chip_id == CHIP_ID_YUKON_PRM &&
3334			hw->chip_rev == CHIP_REV_YU_PRM_A0) {
3335			/* change PHY Interrupt polarity to low active */
3336			reg = sky2_read16(hw, GPHY_CTRL);
3337			sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
3338
3339			/* adapt HW for low active PHY Interrupt */
3340			reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
3341			sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
3342		}
3343
3344		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3345
3346		/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3347		sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
3348	}
3349
3350	/* Clear I2C IRQ noise */
3351	sky2_write32(hw, B2_I2C_IRQ, 1);
3352
3353	/* turn off hardware timer (unused) */
3354	sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
3355	sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
3356
3357	/* Turn off descriptor polling */
3358	sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
3359
3360	/* Turn off receive timestamp */
3361	sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
3362	sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3363
3364	/* enable the Tx Arbiters */
3365	for (i = 0; i < hw->ports; i++)
3366		sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3367
3368	/* Initialize ram interface */
3369	for (i = 0; i < hw->ports; i++) {
3370		sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
3371
3372		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
3373		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
3374		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
3375		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
3376		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
3377		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
3378		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
3379		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
3380		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
3381		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
3382		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
3383		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
3384	}
3385
3386	sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
3387
3388	for (i = 0; i < hw->ports; i++)
3389		sky2_gmac_reset(hw, i);
3390
3391	memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
3392	hw->st_idx = 0;
3393
3394	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
3395	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
3396
3397	sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
3398	sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
3399
3400	/* Set the list last index */
3401	sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
3402
3403	sky2_write16(hw, STAT_TX_IDX_TH, 10);
3404	sky2_write8(hw, STAT_FIFO_WM, 16);
3405
3406	/* set Status-FIFO ISR watermark */
3407	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
3408		sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
3409	else
3410		sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
3411
3412	sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
3413	sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
3414	sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
3415
3416	/* enable status unit */
3417	sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
3418
3419	sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
3420	sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
3421	sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
3422}
3423
3424/* Take device down (offline).
3425 * Equivalent to doing dev_stop() but this does not
3426 * inform upper layers of the transition.
3427 */
3428static void sky2_detach(struct net_device *dev)
3429{
3430	if (netif_running(dev)) {
3431		netif_tx_lock(dev);
3432		netif_device_detach(dev);	/* stop txq */
3433		netif_tx_unlock(dev);
3434		sky2_close(dev);
3435	}
3436}
3437
3438/* Bring device back after doing sky2_detach */
3439static int sky2_reattach(struct net_device *dev)
3440{
3441	int err = 0;
3442
3443	if (netif_running(dev)) {
3444		err = sky2_open(dev);
3445		if (err) {
3446			netdev_info(dev, "could not restart %d\n", err);
3447			dev_close(dev);
3448		} else {
3449			netif_device_attach(dev);
3450			sky2_set_multicast(dev);
3451		}
3452	}
3453
3454	return err;
3455}
3456
3457static void sky2_all_down(struct sky2_hw *hw)
3458{
3459	int i;
3460
3461	if (hw->flags & SKY2_HW_IRQ_SETUP) {
3462		sky2_write32(hw, B0_IMSK, 0);
3463		sky2_read32(hw, B0_IMSK);
3464
3465		synchronize_irq(hw->pdev->irq);
3466		napi_disable(&hw->napi);
3467	}
3468
3469	for (i = 0; i < hw->ports; i++) {
3470		struct net_device *dev = hw->dev[i];
3471		struct sky2_port *sky2 = netdev_priv(dev);
3472
3473		if (!netif_running(dev))
3474			continue;
3475
3476		netif_carrier_off(dev);
3477		netif_tx_disable(dev);
3478		sky2_hw_down(sky2);
3479	}
3480}
3481
3482static void sky2_all_up(struct sky2_hw *hw)
3483{
3484	u32 imask = Y2_IS_BASE;
3485	int i;
3486
3487	for (i = 0; i < hw->ports; i++) {
3488		struct net_device *dev = hw->dev[i];
3489		struct sky2_port *sky2 = netdev_priv(dev);
3490
3491		if (!netif_running(dev))
3492			continue;
3493
3494		sky2_hw_up(sky2);
3495		sky2_set_multicast(dev);
3496		imask |= portirq_msk[i];
3497		netif_wake_queue(dev);
3498	}
3499
3500	if (hw->flags & SKY2_HW_IRQ_SETUP) {
3501		sky2_write32(hw, B0_IMSK, imask);
3502		sky2_read32(hw, B0_IMSK);
3503		sky2_read32(hw, B0_Y2_SP_LISR);
3504		napi_enable(&hw->napi);
3505	}
3506}
3507
3508static void sky2_restart(struct work_struct *work)
3509{
3510	struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
3511
3512	rtnl_lock();
3513
3514	sky2_all_down(hw);
3515	sky2_reset(hw);
3516	sky2_all_up(hw);
3517
3518	rtnl_unlock();
3519}
3520
3521static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
3522{
3523	return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
3524}
3525
3526static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3527{
3528	const struct sky2_port *sky2 = netdev_priv(dev);
3529
3530	wol->supported = sky2_wol_supported(sky2->hw);
3531	wol->wolopts = sky2->wol;
3532}
3533
3534static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3535{
3536	struct sky2_port *sky2 = netdev_priv(dev);
3537	struct sky2_hw *hw = sky2->hw;
3538	bool enable_wakeup = false;
3539	int i;
3540
3541	if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) ||
3542	    !device_can_wakeup(&hw->pdev->dev))
3543		return -EOPNOTSUPP;
3544
3545	sky2->wol = wol->wolopts;
3546
3547	for (i = 0; i < hw->ports; i++) {
3548		struct net_device *dev = hw->dev[i];
3549		struct sky2_port *sky2 = netdev_priv(dev);
3550
3551		if (sky2->wol)
3552			enable_wakeup = true;
3553	}
3554	device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup);
3555
3556	return 0;
3557}
3558
3559static u32 sky2_supported_modes(const struct sky2_hw *hw)
3560{
3561	if (sky2_is_copper(hw)) {
3562		u32 modes = SUPPORTED_10baseT_Half
3563			| SUPPORTED_10baseT_Full
3564			| SUPPORTED_100baseT_Half
3565			| SUPPORTED_100baseT_Full;
3566
3567		if (hw->flags & SKY2_HW_GIGABIT)
3568			modes |= SUPPORTED_1000baseT_Half
3569				| SUPPORTED_1000baseT_Full;
3570		return modes;
3571	} else
3572		return SUPPORTED_1000baseT_Half
3573			| SUPPORTED_1000baseT_Full;
3574}
3575
3576static int sky2_get_link_ksettings(struct net_device *dev,
3577				   struct ethtool_link_ksettings *cmd)
3578{
3579	struct sky2_port *sky2 = netdev_priv(dev);
3580	struct sky2_hw *hw = sky2->hw;
3581	u32 supported, advertising;
3582
3583	supported = sky2_supported_modes(hw);
3584	cmd->base.phy_address = PHY_ADDR_MARV;
3585	if (sky2_is_copper(hw)) {
3586		cmd->base.port = PORT_TP;
3587		cmd->base.speed = sky2->speed;
3588		supported |=  SUPPORTED_Autoneg | SUPPORTED_TP;
3589	} else {
3590		cmd->base.speed = SPEED_1000;
3591		cmd->base.port = PORT_FIBRE;
3592		supported |=  SUPPORTED_Autoneg | SUPPORTED_FIBRE;
3593	}
3594
3595	advertising = sky2->advertising;
3596	cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
3597		? AUTONEG_ENABLE : AUTONEG_DISABLE;
3598	cmd->base.duplex = sky2->duplex;
3599
3600	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3601						supported);
3602	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3603						advertising);
3604
3605	return 0;
3606}
3607
3608static int sky2_set_link_ksettings(struct net_device *dev,
3609				   const struct ethtool_link_ksettings *cmd)
3610{
3611	struct sky2_port *sky2 = netdev_priv(dev);
3612	const struct sky2_hw *hw = sky2->hw;
3613	u32 supported = sky2_supported_modes(hw);
3614	u32 new_advertising;
3615
3616	ethtool_convert_link_mode_to_legacy_u32(&new_advertising,
3617						cmd->link_modes.advertising);
3618
3619	if (cmd->base.autoneg == AUTONEG_ENABLE) {
3620		if (new_advertising & ~supported)
3621			return -EINVAL;
3622
3623		if (sky2_is_copper(hw))
3624			sky2->advertising = new_advertising |
3625					    ADVERTISED_TP |
3626					    ADVERTISED_Autoneg;
3627		else
3628			sky2->advertising = new_advertising |
3629					    ADVERTISED_FIBRE |
3630					    ADVERTISED_Autoneg;
3631
3632		sky2->flags |= SKY2_FLAG_AUTO_SPEED;
3633		sky2->duplex = -1;
3634		sky2->speed = -1;
3635	} else {
3636		u32 setting;
3637		u32 speed = cmd->base.speed;
3638
3639		switch (speed) {
3640		case SPEED_1000:
3641			if (cmd->base.duplex == DUPLEX_FULL)
3642				setting = SUPPORTED_1000baseT_Full;
3643			else if (cmd->base.duplex == DUPLEX_HALF)
3644				setting = SUPPORTED_1000baseT_Half;
3645			else
3646				return -EINVAL;
3647			break;
3648		case SPEED_100:
3649			if (cmd->base.duplex == DUPLEX_FULL)
3650				setting = SUPPORTED_100baseT_Full;
3651			else if (cmd->base.duplex == DUPLEX_HALF)
3652				setting = SUPPORTED_100baseT_Half;
3653			else
3654				return -EINVAL;
3655			break;
3656
3657		case SPEED_10:
3658			if (cmd->base.duplex == DUPLEX_FULL)
3659				setting = SUPPORTED_10baseT_Full;
3660			else if (cmd->base.duplex == DUPLEX_HALF)
3661				setting = SUPPORTED_10baseT_Half;
3662			else
3663				return -EINVAL;
3664			break;
3665		default:
3666			return -EINVAL;
3667		}
3668
3669		if ((setting & supported) == 0)
3670			return -EINVAL;
3671
3672		sky2->speed = speed;
3673		sky2->duplex = cmd->base.duplex;
3674		sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
3675	}
3676
3677	if (netif_running(dev)) {
3678		sky2_phy_reinit(sky2);
3679		sky2_set_multicast(dev);
3680	}
3681
3682	return 0;
3683}
3684
3685static void sky2_get_drvinfo(struct net_device *dev,
3686			     struct ethtool_drvinfo *info)
3687{
3688	struct sky2_port *sky2 = netdev_priv(dev);
3689
3690	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
3691	strscpy(info->version, DRV_VERSION, sizeof(info->version));
3692	strscpy(info->bus_info, pci_name(sky2->hw->pdev),
3693		sizeof(info->bus_info));
3694}
3695
3696static const struct sky2_stat {
3697	char name[ETH_GSTRING_LEN];
3698	u16 offset;
3699} sky2_stats[] = {
3700	{ "tx_bytes",	   GM_TXO_OK_HI },
3701	{ "rx_bytes",	   GM_RXO_OK_HI },
3702	{ "tx_broadcast",  GM_TXF_BC_OK },
3703	{ "rx_broadcast",  GM_RXF_BC_OK },
3704	{ "tx_multicast",  GM_TXF_MC_OK },
3705	{ "rx_multicast",  GM_RXF_MC_OK },
3706	{ "tx_unicast",    GM_TXF_UC_OK },
3707	{ "rx_unicast",    GM_RXF_UC_OK },
3708	{ "tx_mac_pause",  GM_TXF_MPAUSE },
3709	{ "rx_mac_pause",  GM_RXF_MPAUSE },
3710	{ "collisions",    GM_TXF_COL },
3711	{ "late_collision",GM_TXF_LAT_COL },
3712	{ "aborted", 	   GM_TXF_ABO_COL },
3713	{ "single_collisions", GM_TXF_SNG_COL },
3714	{ "multi_collisions", GM_TXF_MUL_COL },
3715
3716	{ "rx_short",      GM_RXF_SHT },
3717	{ "rx_runt", 	   GM_RXE_FRAG },
3718	{ "rx_64_byte_packets", GM_RXF_64B },
3719	{ "rx_65_to_127_byte_packets", GM_RXF_127B },
3720	{ "rx_128_to_255_byte_packets", GM_RXF_255B },
3721	{ "rx_256_to_511_byte_packets", GM_RXF_511B },
3722	{ "rx_512_to_1023_byte_packets", GM_RXF_1023B },
3723	{ "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
3724	{ "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
3725	{ "rx_too_long",   GM_RXF_LNG_ERR },
3726	{ "rx_fifo_overflow", GM_RXE_FIFO_OV },
3727	{ "rx_jabber",     GM_RXF_JAB_PKT },
3728	{ "rx_fcs_error",   GM_RXF_FCS_ERR },
3729
3730	{ "tx_64_byte_packets", GM_TXF_64B },
3731	{ "tx_65_to_127_byte_packets", GM_TXF_127B },
3732	{ "tx_128_to_255_byte_packets", GM_TXF_255B },
3733	{ "tx_256_to_511_byte_packets", GM_TXF_511B },
3734	{ "tx_512_to_1023_byte_packets", GM_TXF_1023B },
3735	{ "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
3736	{ "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
3737	{ "tx_fifo_underrun", GM_TXE_FIFO_UR },
3738};
3739
3740static u32 sky2_get_msglevel(struct net_device *netdev)
3741{
3742	struct sky2_port *sky2 = netdev_priv(netdev);
3743	return sky2->msg_enable;
3744}
3745
3746static int sky2_nway_reset(struct net_device *dev)
3747{
3748	struct sky2_port *sky2 = netdev_priv(dev);
3749
3750	if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED))
3751		return -EINVAL;
3752
3753	sky2_phy_reinit(sky2);
3754	sky2_set_multicast(dev);
3755
3756	return 0;
3757}
3758
3759static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
3760{
3761	struct sky2_hw *hw = sky2->hw;
3762	unsigned port = sky2->port;
3763	int i;
3764
3765	data[0] = get_stats64(hw, port, GM_TXO_OK_LO);
3766	data[1] = get_stats64(hw, port, GM_RXO_OK_LO);
3767
3768	for (i = 2; i < count; i++)
3769		data[i] = get_stats32(hw, port, sky2_stats[i].offset);
3770}
3771
3772static void sky2_set_msglevel(struct net_device *netdev, u32 value)
3773{
3774	struct sky2_port *sky2 = netdev_priv(netdev);
3775	sky2->msg_enable = value;
3776}
3777
3778static int sky2_get_sset_count(struct net_device *dev, int sset)
3779{
3780	switch (sset) {
3781	case ETH_SS_STATS:
3782		return ARRAY_SIZE(sky2_stats);
3783	default:
3784		return -EOPNOTSUPP;
3785	}
3786}
3787
3788static void sky2_get_ethtool_stats(struct net_device *dev,
3789				   struct ethtool_stats *stats, u64 * data)
3790{
3791	struct sky2_port *sky2 = netdev_priv(dev);
3792
3793	sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
3794}
3795
3796static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
3797{
3798	int i;
3799
3800	switch (stringset) {
3801	case ETH_SS_STATS:
3802		for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
3803			memcpy(data + i * ETH_GSTRING_LEN,
3804			       sky2_stats[i].name, ETH_GSTRING_LEN);
3805		break;
3806	}
3807}
3808
3809static int sky2_set_mac_address(struct net_device *dev, void *p)
3810{
3811	struct sky2_port *sky2 = netdev_priv(dev);
3812	struct sky2_hw *hw = sky2->hw;
3813	unsigned port = sky2->port;
3814	const struct sockaddr *addr = p;
3815
3816	if (!is_valid_ether_addr(addr->sa_data))
3817		return -EADDRNOTAVAIL;
3818
3819	eth_hw_addr_set(dev, addr->sa_data);
3820	memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
3821		    dev->dev_addr, ETH_ALEN);
3822	memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
3823		    dev->dev_addr, ETH_ALEN);
3824
3825	/* virtual address for data */
3826	gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3827
3828	/* physical address: used for pause frames */
3829	gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3830
3831	return 0;
3832}
3833
3834static inline void sky2_add_filter(u8 filter[8], const u8 *addr)
3835{
3836	u32 bit;
3837
3838	bit = ether_crc(ETH_ALEN, addr) & 63;
3839	filter[bit >> 3] |= 1 << (bit & 7);
3840}
3841
3842static void sky2_set_multicast(struct net_device *dev)
3843{
3844	struct sky2_port *sky2 = netdev_priv(dev);
3845	struct sky2_hw *hw = sky2->hw;
3846	unsigned port = sky2->port;
3847	struct netdev_hw_addr *ha;
3848	u16 reg;
3849	u8 filter[8];
3850	int rx_pause;
3851	static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
3852
3853	rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
3854	memset(filter, 0, sizeof(filter));
3855
3856	reg = gma_read16(hw, port, GM_RX_CTRL);
3857	reg |= GM_RXCR_UCF_ENA;
3858
3859	if (dev->flags & IFF_PROMISC)	/* promiscuous */
3860		reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
3861	else if (dev->flags & IFF_ALLMULTI)
3862		memset(filter, 0xff, sizeof(filter));
3863	else if (netdev_mc_empty(dev) && !rx_pause)
3864		reg &= ~GM_RXCR_MCF_ENA;
3865	else {
3866		reg |= GM_RXCR_MCF_ENA;
3867
3868		if (rx_pause)
3869			sky2_add_filter(filter, pause_mc_addr);
3870
3871		netdev_for_each_mc_addr(ha, dev)
3872			sky2_add_filter(filter, ha->addr);
3873	}
3874
3875	gma_write16(hw, port, GM_MC_ADDR_H1,
3876		    (u16) filter[0] | ((u16) filter[1] << 8));
3877	gma_write16(hw, port, GM_MC_ADDR_H2,
3878		    (u16) filter[2] | ((u16) filter[3] << 8));
3879	gma_write16(hw, port, GM_MC_ADDR_H3,
3880		    (u16) filter[4] | ((u16) filter[5] << 8));
3881	gma_write16(hw, port, GM_MC_ADDR_H4,
3882		    (u16) filter[6] | ((u16) filter[7] << 8));
3883
3884	gma_write16(hw, port, GM_RX_CTRL, reg);
3885}
3886
3887static void sky2_get_stats(struct net_device *dev,
3888			   struct rtnl_link_stats64 *stats)
3889{
3890	struct sky2_port *sky2 = netdev_priv(dev);
3891	struct sky2_hw *hw = sky2->hw;
3892	unsigned port = sky2->port;
3893	unsigned int start;
3894	u64 _bytes, _packets;
3895
3896	do {
3897		start = u64_stats_fetch_begin(&sky2->rx_stats.syncp);
3898		_bytes = sky2->rx_stats.bytes;
3899		_packets = sky2->rx_stats.packets;
3900	} while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start));
3901
3902	stats->rx_packets = _packets;
3903	stats->rx_bytes = _bytes;
3904
3905	do {
3906		start = u64_stats_fetch_begin(&sky2->tx_stats.syncp);
3907		_bytes = sky2->tx_stats.bytes;
3908		_packets = sky2->tx_stats.packets;
3909	} while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start));
3910
3911	stats->tx_packets = _packets;
3912	stats->tx_bytes = _bytes;
3913
3914	stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK)
3915		+ get_stats32(hw, port, GM_RXF_BC_OK);
3916
3917	stats->collisions = get_stats32(hw, port, GM_TXF_COL);
3918
3919	stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR);
3920	stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR);
3921	stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT)
3922		+ get_stats32(hw, port, GM_RXE_FRAG);
3923	stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV);
3924
3925	stats->rx_dropped = dev->stats.rx_dropped;
3926	stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
3927	stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
3928}
3929
3930/* Can have one global because blinking is controlled by
3931 * ethtool and that is always under RTNL mutex
3932 */
3933static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
3934{
3935	struct sky2_hw *hw = sky2->hw;
3936	unsigned port = sky2->port;
3937
3938	spin_lock_bh(&sky2->phy_lock);
3939	if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3940	    hw->chip_id == CHIP_ID_YUKON_EX ||
3941	    hw->chip_id == CHIP_ID_YUKON_SUPR) {
3942		u16 pg;
3943		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3944		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
3945
3946		switch (mode) {
3947		case MO_LED_OFF:
3948			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3949				     PHY_M_LEDC_LOS_CTRL(8) |
3950				     PHY_M_LEDC_INIT_CTRL(8) |
3951				     PHY_M_LEDC_STA1_CTRL(8) |
3952				     PHY_M_LEDC_STA0_CTRL(8));
3953			break;
3954		case MO_LED_ON:
3955			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3956				     PHY_M_LEDC_LOS_CTRL(9) |
3957				     PHY_M_LEDC_INIT_CTRL(9) |
3958				     PHY_M_LEDC_STA1_CTRL(9) |
3959				     PHY_M_LEDC_STA0_CTRL(9));
3960			break;
3961		case MO_LED_BLINK:
3962			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3963				     PHY_M_LEDC_LOS_CTRL(0xa) |
3964				     PHY_M_LEDC_INIT_CTRL(0xa) |
3965				     PHY_M_LEDC_STA1_CTRL(0xa) |
3966				     PHY_M_LEDC_STA0_CTRL(0xa));
3967			break;
3968		case MO_LED_NORM:
3969			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3970				     PHY_M_LEDC_LOS_CTRL(1) |
3971				     PHY_M_LEDC_INIT_CTRL(8) |
3972				     PHY_M_LEDC_STA1_CTRL(7) |
3973				     PHY_M_LEDC_STA0_CTRL(7));
3974		}
3975
3976		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3977	} else
3978		gm_phy_write(hw, port, PHY_MARV_LED_OVER,
3979				     PHY_M_LED_MO_DUP(mode) |
3980				     PHY_M_LED_MO_10(mode) |
3981				     PHY_M_LED_MO_100(mode) |
3982				     PHY_M_LED_MO_1000(mode) |
3983				     PHY_M_LED_MO_RX(mode) |
3984				     PHY_M_LED_MO_TX(mode));
3985
3986	spin_unlock_bh(&sky2->phy_lock);
3987}
3988
3989/* blink LED's for finding board */
3990static int sky2_set_phys_id(struct net_device *dev,
3991			    enum ethtool_phys_id_state state)
3992{
3993	struct sky2_port *sky2 = netdev_priv(dev);
3994
3995	switch (state) {
3996	case ETHTOOL_ID_ACTIVE:
3997		return 1;	/* cycle on/off once per second */
3998	case ETHTOOL_ID_INACTIVE:
3999		sky2_led(sky2, MO_LED_NORM);
4000		break;
4001	case ETHTOOL_ID_ON:
4002		sky2_led(sky2, MO_LED_ON);
4003		break;
4004	case ETHTOOL_ID_OFF:
4005		sky2_led(sky2, MO_LED_OFF);
4006		break;
4007	}
4008
4009	return 0;
4010}
4011
4012static void sky2_get_pauseparam(struct net_device *dev,
4013				struct ethtool_pauseparam *ecmd)
4014{
4015	struct sky2_port *sky2 = netdev_priv(dev);
4016
4017	switch (sky2->flow_mode) {
4018	case FC_NONE:
4019		ecmd->tx_pause = ecmd->rx_pause = 0;
4020		break;
4021	case FC_TX:
4022		ecmd->tx_pause = 1, ecmd->rx_pause = 0;
4023		break;
4024	case FC_RX:
4025		ecmd->tx_pause = 0, ecmd->rx_pause = 1;
4026		break;
4027	case FC_BOTH:
4028		ecmd->tx_pause = ecmd->rx_pause = 1;
4029	}
4030
4031	ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE)
4032		? AUTONEG_ENABLE : AUTONEG_DISABLE;
4033}
4034
4035static int sky2_set_pauseparam(struct net_device *dev,
4036			       struct ethtool_pauseparam *ecmd)
4037{
4038	struct sky2_port *sky2 = netdev_priv(dev);
4039
4040	if (ecmd->autoneg == AUTONEG_ENABLE)
4041		sky2->flags |= SKY2_FLAG_AUTO_PAUSE;
4042	else
4043		sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE;
4044
4045	sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
4046
4047	if (netif_running(dev))
4048		sky2_phy_reinit(sky2);
4049
4050	return 0;
4051}
4052
4053static int sky2_get_coalesce(struct net_device *dev,
4054			     struct ethtool_coalesce *ecmd,
4055			     struct kernel_ethtool_coalesce *kernel_coal,
4056			     struct netlink_ext_ack *extack)
4057{
4058	struct sky2_port *sky2 = netdev_priv(dev);
4059	struct sky2_hw *hw = sky2->hw;
4060
4061	if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
4062		ecmd->tx_coalesce_usecs = 0;
4063	else {
4064		u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
4065		ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
4066	}
4067	ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
4068
4069	if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
4070		ecmd->rx_coalesce_usecs = 0;
4071	else {
4072		u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
4073		ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
4074	}
4075	ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
4076
4077	if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
4078		ecmd->rx_coalesce_usecs_irq = 0;
4079	else {
4080		u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
4081		ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
4082	}
4083
4084	ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
4085
4086	return 0;
4087}
4088
4089/* Note: this affect both ports */
4090static int sky2_set_coalesce(struct net_device *dev,
4091			     struct ethtool_coalesce *ecmd,
4092			     struct kernel_ethtool_coalesce *kernel_coal,
4093			     struct netlink_ext_ack *extack)
4094{
4095	struct sky2_port *sky2 = netdev_priv(dev);
4096	struct sky2_hw *hw = sky2->hw;
4097	const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
4098
4099	if (ecmd->tx_coalesce_usecs > tmax ||
4100	    ecmd->rx_coalesce_usecs > tmax ||
4101	    ecmd->rx_coalesce_usecs_irq > tmax)
4102		return -EINVAL;
4103
4104	if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1)
4105		return -EINVAL;
4106	if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
4107		return -EINVAL;
4108	if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING)
4109		return -EINVAL;
4110
4111	if (ecmd->tx_coalesce_usecs == 0)
4112		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
4113	else {
4114		sky2_write32(hw, STAT_TX_TIMER_INI,
4115			     sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
4116		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
4117	}
4118	sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
4119
4120	if (ecmd->rx_coalesce_usecs == 0)
4121		sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
4122	else {
4123		sky2_write32(hw, STAT_LEV_TIMER_INI,
4124			     sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
4125		sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
4126	}
4127	sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
4128
4129	if (ecmd->rx_coalesce_usecs_irq == 0)
4130		sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
4131	else {
4132		sky2_write32(hw, STAT_ISR_TIMER_INI,
4133			     sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
4134		sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
4135	}
4136	sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
4137	return 0;
4138}
4139
4140/*
4141 * Hardware is limited to min of 128 and max of 2048 for ring size
4142 * and  rounded up to next power of two
4143 * to avoid division in modulus calculation
4144 */
4145static unsigned long roundup_ring_size(unsigned long pending)
4146{
4147	return max(128ul, roundup_pow_of_two(pending+1));
4148}
4149
4150static void sky2_get_ringparam(struct net_device *dev,
4151			       struct ethtool_ringparam *ering,
4152			       struct kernel_ethtool_ringparam *kernel_ering,
4153			       struct netlink_ext_ack *extack)
4154{
4155	struct sky2_port *sky2 = netdev_priv(dev);
4156
4157	ering->rx_max_pending = RX_MAX_PENDING;
4158	ering->tx_max_pending = TX_MAX_PENDING;
4159
4160	ering->rx_pending = sky2->rx_pending;
4161	ering->tx_pending = sky2->tx_pending;
4162}
4163
4164static int sky2_set_ringparam(struct net_device *dev,
4165			      struct ethtool_ringparam *ering,
4166			      struct kernel_ethtool_ringparam *kernel_ering,
4167			      struct netlink_ext_ack *extack)
4168{
4169	struct sky2_port *sky2 = netdev_priv(dev);
4170
4171	if (ering->rx_pending > RX_MAX_PENDING ||
4172	    ering->rx_pending < 8 ||
4173	    ering->tx_pending < TX_MIN_PENDING ||
4174	    ering->tx_pending > TX_MAX_PENDING)
4175		return -EINVAL;
4176
4177	sky2_detach(dev);
4178
4179	sky2->rx_pending = ering->rx_pending;
4180	sky2->tx_pending = ering->tx_pending;
4181	sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending);
4182
4183	return sky2_reattach(dev);
4184}
4185
4186static int sky2_get_regs_len(struct net_device *dev)
4187{
4188	return 0x4000;
4189}
4190
4191static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
4192{
4193	/* This complicated switch statement is to make sure and
4194	 * only access regions that are unreserved.
4195	 * Some blocks are only valid on dual port cards.
4196	 */
4197	switch (b) {
4198	/* second port */
4199	case 5:		/* Tx Arbiter 2 */
4200	case 9:		/* RX2 */
4201	case 14 ... 15:	/* TX2 */
4202	case 17: case 19: /* Ram Buffer 2 */
4203	case 22 ... 23: /* Tx Ram Buffer 2 */
4204	case 25:	/* Rx MAC Fifo 1 */
4205	case 27:	/* Tx MAC Fifo 2 */
4206	case 31:	/* GPHY 2 */
4207	case 40 ... 47: /* Pattern Ram 2 */
4208	case 52: case 54: /* TCP Segmentation 2 */
4209	case 112 ... 116: /* GMAC 2 */
4210		return hw->ports > 1;
4211
4212	case 0:		/* Control */
4213	case 2:		/* Mac address */
4214	case 4:		/* Tx Arbiter 1 */
4215	case 7:		/* PCI express reg */
4216	case 8:		/* RX1 */
4217	case 12 ... 13: /* TX1 */
4218	case 16: case 18:/* Rx Ram Buffer 1 */
4219	case 20 ... 21: /* Tx Ram Buffer 1 */
4220	case 24:	/* Rx MAC Fifo 1 */
4221	case 26:	/* Tx MAC Fifo 1 */
4222	case 28 ... 29: /* Descriptor and status unit */
4223	case 30:	/* GPHY 1*/
4224	case 32 ... 39: /* Pattern Ram 1 */
4225	case 48: case 50: /* TCP Segmentation 1 */
4226	case 56 ... 60:	/* PCI space */
4227	case 80 ... 84:	/* GMAC 1 */
4228		return 1;
4229
4230	default:
4231		return 0;
4232	}
4233}
4234
4235/*
4236 * Returns copy of control register region
4237 * Note: ethtool_get_regs always provides full size (16k) buffer
4238 */
4239static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4240			  void *p)
4241{
4242	const struct sky2_port *sky2 = netdev_priv(dev);
4243	const void __iomem *io = sky2->hw->regs;
4244	unsigned int b;
4245
4246	regs->version = 1;
4247
4248	for (b = 0; b < 128; b++) {
4249		/* skip poisonous diagnostic ram region in block 3 */
4250		if (b == 3)
4251			memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
4252		else if (sky2_reg_access_ok(sky2->hw, b))
4253			memcpy_fromio(p, io, 128);
4254		else
4255			memset(p, 0, 128);
4256
4257		p += 128;
4258		io += 128;
4259	}
4260}
4261
4262static int sky2_get_eeprom_len(struct net_device *dev)
4263{
4264	struct sky2_port *sky2 = netdev_priv(dev);
4265	struct sky2_hw *hw = sky2->hw;
4266	u16 reg2;
4267
4268	reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
4269	return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
4270}
4271
4272static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4273			   u8 *data)
4274{
4275	struct sky2_port *sky2 = netdev_priv(dev);
4276	int rc;
4277
4278	eeprom->magic = SKY2_EEPROM_MAGIC;
4279	rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
4280			      data);
4281	if (rc < 0)
4282		return rc;
4283
4284	eeprom->len = rc;
4285
4286	return 0;
4287}
4288
4289static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4290			   u8 *data)
4291{
4292	struct sky2_port *sky2 = netdev_priv(dev);
4293	int rc;
4294
4295	if (eeprom->magic != SKY2_EEPROM_MAGIC)
4296		return -EINVAL;
4297
4298	rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
4299			       data);
4300
4301	return rc < 0 ? rc : 0;
4302}
4303
4304static netdev_features_t sky2_fix_features(struct net_device *dev,
4305	netdev_features_t features)
4306{
4307	const struct sky2_port *sky2 = netdev_priv(dev);
4308	const struct sky2_hw *hw = sky2->hw;
4309
4310	/* In order to do Jumbo packets on these chips, need to turn off the
4311	 * transmit store/forward. Therefore checksum offload won't work.
4312	 */
4313	if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) {
4314		netdev_info(dev, "checksum offload not possible with jumbo frames\n");
4315		features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_CSUM_MASK);
4316	}
4317
4318	/* Some hardware requires receive checksum for RSS to work. */
4319	if ( (features & NETIF_F_RXHASH) &&
4320	     !(features & NETIF_F_RXCSUM) &&
4321	     (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) {
4322		netdev_info(dev, "receive hashing forces receive checksum\n");
4323		features |= NETIF_F_RXCSUM;
4324	}
4325
4326	return features;
4327}
4328
4329static int sky2_set_features(struct net_device *dev, netdev_features_t features)
4330{
4331	struct sky2_port *sky2 = netdev_priv(dev);
4332	netdev_features_t changed = dev->features ^ features;
4333
4334	if ((changed & NETIF_F_RXCSUM) &&
4335	    !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
4336		sky2_write32(sky2->hw,
4337			     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
4338			     (features & NETIF_F_RXCSUM)
4339			     ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
4340	}
4341
4342	if (changed & NETIF_F_RXHASH)
4343		rx_set_rss(dev, features);
4344
4345	if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4346		sky2_vlan_mode(dev, features);
4347
4348	return 0;
4349}
4350
4351static const struct ethtool_ops sky2_ethtool_ops = {
4352	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4353				     ETHTOOL_COALESCE_MAX_FRAMES |
4354				     ETHTOOL_COALESCE_RX_USECS_IRQ |
4355				     ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ,
4356	.get_drvinfo	= sky2_get_drvinfo,
4357	.get_wol	= sky2_get_wol,
4358	.set_wol	= sky2_set_wol,
4359	.get_msglevel	= sky2_get_msglevel,
4360	.set_msglevel	= sky2_set_msglevel,
4361	.nway_reset	= sky2_nway_reset,
4362	.get_regs_len	= sky2_get_regs_len,
4363	.get_regs	= sky2_get_regs,
4364	.get_link	= ethtool_op_get_link,
4365	.get_eeprom_len	= sky2_get_eeprom_len,
4366	.get_eeprom	= sky2_get_eeprom,
4367	.set_eeprom	= sky2_set_eeprom,
4368	.get_strings	= sky2_get_strings,
4369	.get_coalesce	= sky2_get_coalesce,
4370	.set_coalesce	= sky2_set_coalesce,
4371	.get_ringparam	= sky2_get_ringparam,
4372	.set_ringparam	= sky2_set_ringparam,
4373	.get_pauseparam = sky2_get_pauseparam,
4374	.set_pauseparam = sky2_set_pauseparam,
4375	.set_phys_id	= sky2_set_phys_id,
4376	.get_sset_count = sky2_get_sset_count,
4377	.get_ethtool_stats = sky2_get_ethtool_stats,
4378	.get_link_ksettings = sky2_get_link_ksettings,
4379	.set_link_ksettings = sky2_set_link_ksettings,
4380};
4381
4382#ifdef CONFIG_SKY2_DEBUG
4383
4384static struct dentry *sky2_debug;
4385
4386static int sky2_debug_show(struct seq_file *seq, void *v)
4387{
4388	struct net_device *dev = seq->private;
4389	const struct sky2_port *sky2 = netdev_priv(dev);
4390	struct sky2_hw *hw = sky2->hw;
4391	unsigned port = sky2->port;
4392	unsigned idx, last;
4393	int sop;
4394
4395	seq_printf(seq, "IRQ src=%x mask=%x control=%x\n",
4396		   sky2_read32(hw, B0_ISRC),
4397		   sky2_read32(hw, B0_IMSK),
4398		   sky2_read32(hw, B0_Y2_SP_ICR));
4399
4400	if (!netif_running(dev)) {
4401		seq_puts(seq, "network not running\n");
4402		return 0;
4403	}
4404
4405	napi_disable(&hw->napi);
4406	last = sky2_read16(hw, STAT_PUT_IDX);
4407
4408	seq_printf(seq, "Status ring %u\n", hw->st_size);
4409	if (hw->st_idx == last)
4410		seq_puts(seq, "Status ring (empty)\n");
4411	else {
4412		seq_puts(seq, "Status ring\n");
4413		for (idx = hw->st_idx; idx != last && idx < hw->st_size;
4414		     idx = RING_NEXT(idx, hw->st_size)) {
4415			const struct sky2_status_le *le = hw->st_le + idx;
4416			seq_printf(seq, "[%d] %#x %d %#x\n",
4417				   idx, le->opcode, le->length, le->status);
4418		}
4419		seq_puts(seq, "\n");
4420	}
4421
4422	seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n",
4423		   sky2->tx_cons, sky2->tx_prod,
4424		   sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
4425		   sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE)));
4426
4427	/* Dump contents of tx ring */
4428	sop = 1;
4429	for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size;
4430	     idx = RING_NEXT(idx, sky2->tx_ring_size)) {
4431		const struct sky2_tx_le *le = sky2->tx_le + idx;
4432		u32 a = le32_to_cpu(le->addr);
4433
4434		if (sop)
4435			seq_printf(seq, "%u:", idx);
4436		sop = 0;
4437
4438		switch (le->opcode & ~HW_OWNER) {
4439		case OP_ADDR64:
4440			seq_printf(seq, " %#x:", a);
4441			break;
4442		case OP_LRGLEN:
4443			seq_printf(seq, " mtu=%d", a);
4444			break;
4445		case OP_VLAN:
4446			seq_printf(seq, " vlan=%d", be16_to_cpu(le->length));
4447			break;
4448		case OP_TCPLISW:
4449			seq_printf(seq, " csum=%#x", a);
4450			break;
4451		case OP_LARGESEND:
4452			seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length));
4453			break;
4454		case OP_PACKET:
4455			seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length));
4456			break;
4457		case OP_BUFFER:
4458			seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length));
4459			break;
4460		default:
4461			seq_printf(seq, " op=%#x,%#x(%d)", le->opcode,
4462				   a, le16_to_cpu(le->length));
4463		}
4464
4465		if (le->ctrl & EOP) {
4466			seq_putc(seq, '\n');
4467			sop = 1;
4468		}
4469	}
4470
4471	seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n",
4472		   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)),
4473		   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)),
4474		   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX)));
4475
4476	sky2_read32(hw, B0_Y2_SP_LISR);
4477	napi_enable(&hw->napi);
4478	return 0;
4479}
4480DEFINE_SHOW_ATTRIBUTE(sky2_debug);
4481
4482/*
4483 * Use network device events to create/remove/rename
4484 * debugfs file entries
4485 */
4486static int sky2_device_event(struct notifier_block *unused,
4487			     unsigned long event, void *ptr)
4488{
4489	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4490	struct sky2_port *sky2 = netdev_priv(dev);
4491
4492	if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
4493		return NOTIFY_DONE;
4494
4495	switch (event) {
4496	case NETDEV_CHANGENAME:
4497		if (sky2->debugfs) {
4498			sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
4499						       sky2_debug, dev->name);
4500		}
4501		break;
4502
4503	case NETDEV_GOING_DOWN:
4504		if (sky2->debugfs) {
4505			netdev_printk(KERN_DEBUG, dev, "remove debugfs\n");
4506			debugfs_remove(sky2->debugfs);
4507			sky2->debugfs = NULL;
4508		}
4509		break;
4510
4511	case NETDEV_UP:
4512		sky2->debugfs = debugfs_create_file(dev->name, 0444,
4513						    sky2_debug, dev,
4514						    &sky2_debug_fops);
4515		if (IS_ERR(sky2->debugfs))
4516			sky2->debugfs = NULL;
4517	}
4518
4519	return NOTIFY_DONE;
4520}
4521
4522static struct notifier_block sky2_notifier = {
4523	.notifier_call = sky2_device_event,
4524};
4525
4526
4527static __init void sky2_debug_init(void)
4528{
4529	struct dentry *ent;
4530
4531	ent = debugfs_create_dir("sky2", NULL);
4532	if (!ent || IS_ERR(ent))
4533		return;
4534
4535	sky2_debug = ent;
4536	register_netdevice_notifier(&sky2_notifier);
4537}
4538
4539static __exit void sky2_debug_cleanup(void)
4540{
4541	if (sky2_debug) {
4542		unregister_netdevice_notifier(&sky2_notifier);
4543		debugfs_remove(sky2_debug);
4544		sky2_debug = NULL;
4545	}
4546}
4547
4548#else
4549#define sky2_debug_init()
4550#define sky2_debug_cleanup()
4551#endif
4552
4553/* Two copies of network device operations to handle special case of
4554 * not allowing netpoll on second port
4555 */
4556static const struct net_device_ops sky2_netdev_ops[2] = {
4557  {
4558	.ndo_open		= sky2_open,
4559	.ndo_stop		= sky2_close,
4560	.ndo_start_xmit		= sky2_xmit_frame,
4561	.ndo_eth_ioctl		= sky2_ioctl,
4562	.ndo_validate_addr	= eth_validate_addr,
4563	.ndo_set_mac_address	= sky2_set_mac_address,
4564	.ndo_set_rx_mode	= sky2_set_multicast,
4565	.ndo_change_mtu		= sky2_change_mtu,
4566	.ndo_fix_features	= sky2_fix_features,
4567	.ndo_set_features	= sky2_set_features,
4568	.ndo_tx_timeout		= sky2_tx_timeout,
4569	.ndo_get_stats64	= sky2_get_stats,
4570#ifdef CONFIG_NET_POLL_CONTROLLER
4571	.ndo_poll_controller	= sky2_netpoll,
4572#endif
4573  },
4574  {
4575	.ndo_open		= sky2_open,
4576	.ndo_stop		= sky2_close,
4577	.ndo_start_xmit		= sky2_xmit_frame,
4578	.ndo_eth_ioctl		= sky2_ioctl,
4579	.ndo_validate_addr	= eth_validate_addr,
4580	.ndo_set_mac_address	= sky2_set_mac_address,
4581	.ndo_set_rx_mode	= sky2_set_multicast,
4582	.ndo_change_mtu		= sky2_change_mtu,
4583	.ndo_fix_features	= sky2_fix_features,
4584	.ndo_set_features	= sky2_set_features,
4585	.ndo_tx_timeout		= sky2_tx_timeout,
4586	.ndo_get_stats64	= sky2_get_stats,
4587  },
4588};
4589
4590/* Initialize network device */
4591static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4592					   int highmem, int wol)
4593{
4594	struct sky2_port *sky2;
4595	struct net_device *dev = alloc_etherdev(sizeof(*sky2));
4596	int ret;
4597
4598	if (!dev)
4599		return NULL;
4600
4601	SET_NETDEV_DEV(dev, &hw->pdev->dev);
4602	dev->irq = hw->pdev->irq;
4603	dev->ethtool_ops = &sky2_ethtool_ops;
4604	dev->watchdog_timeo = TX_WATCHDOG;
4605	dev->netdev_ops = &sky2_netdev_ops[port];
4606
4607	sky2 = netdev_priv(dev);
4608	sky2->netdev = dev;
4609	sky2->hw = hw;
4610	sky2->msg_enable = netif_msg_init(debug, default_msg);
4611
4612	u64_stats_init(&sky2->tx_stats.syncp);
4613	u64_stats_init(&sky2->rx_stats.syncp);
4614
4615	/* Auto speed and flow control */
4616	sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
4617	if (hw->chip_id != CHIP_ID_YUKON_XL)
4618		dev->hw_features |= NETIF_F_RXCSUM;
4619
4620	sky2->flow_mode = FC_BOTH;
4621
4622	sky2->duplex = -1;
4623	sky2->speed = -1;
4624	sky2->advertising = sky2_supported_modes(hw);
4625	sky2->wol = wol;
4626
4627	spin_lock_init(&sky2->phy_lock);
4628
4629	sky2->tx_pending = TX_DEF_PENDING;
4630	sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING);
4631	sky2->rx_pending = RX_DEF_PENDING;
4632
4633	hw->dev[port] = dev;
4634
4635	sky2->port = port;
4636
4637	dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
4638
4639	if (highmem)
4640		dev->features |= NETIF_F_HIGHDMA;
4641
4642	/* Enable receive hashing unless hardware is known broken */
4643	if (!(hw->flags & SKY2_HW_RSS_BROKEN))
4644		dev->hw_features |= NETIF_F_RXHASH;
4645
4646	if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
4647		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
4648				    NETIF_F_HW_VLAN_CTAG_RX;
4649		dev->vlan_features |= SKY2_VLAN_OFFLOADS;
4650	}
4651
4652	dev->features |= dev->hw_features;
4653
4654	/* MTU range: 60 - 1500 or 9000 */
4655	dev->min_mtu = ETH_ZLEN;
4656	if (hw->chip_id == CHIP_ID_YUKON_FE ||
4657	    hw->chip_id == CHIP_ID_YUKON_FE_P)
4658		dev->max_mtu = ETH_DATA_LEN;
4659	else
4660		dev->max_mtu = ETH_JUMBO_MTU;
4661
4662	/* try to get mac address in the following order:
4663	 * 1) from device tree data
4664	 * 2) from internal registers set by bootloader
4665	 */
4666	ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev);
4667	if (ret) {
4668		u8 addr[ETH_ALEN];
4669
4670		memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
4671		eth_hw_addr_set(dev, addr);
4672	}
4673
4674	/* if the address is invalid, use a random value */
4675	if (!is_valid_ether_addr(dev->dev_addr)) {
4676		struct sockaddr sa = { AF_UNSPEC };
4677
4678		dev_warn(&hw->pdev->dev, "Invalid MAC address, defaulting to random\n");
4679		eth_hw_addr_random(dev);
4680		memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
4681		if (sky2_set_mac_address(dev, &sa))
4682			dev_warn(&hw->pdev->dev, "Failed to set MAC address.\n");
4683	}
4684
4685	return dev;
4686}
4687
4688static void sky2_show_addr(struct net_device *dev)
4689{
4690	const struct sky2_port *sky2 = netdev_priv(dev);
4691
4692	netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
4693}
4694
4695/* Handle software interrupt used during MSI test */
4696static irqreturn_t sky2_test_intr(int irq, void *dev_id)
4697{
4698	struct sky2_hw *hw = dev_id;
4699	u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
4700
4701	if (status == 0)
4702		return IRQ_NONE;
4703
4704	if (status & Y2_IS_IRQ_SW) {
4705		hw->flags |= SKY2_HW_USE_MSI;
4706		wake_up(&hw->msi_wait);
4707		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4708	}
4709	sky2_write32(hw, B0_Y2_SP_ICR, 2);
4710
4711	return IRQ_HANDLED;
4712}
4713
4714/* Test interrupt path by forcing a software IRQ */
4715static int sky2_test_msi(struct sky2_hw *hw)
4716{
4717	struct pci_dev *pdev = hw->pdev;
4718	int err;
4719
4720	init_waitqueue_head(&hw->msi_wait);
4721
4722	err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
4723	if (err) {
4724		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4725		return err;
4726	}
4727
4728	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4729
4730	sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
4731	sky2_read8(hw, B0_CTST);
4732
4733	wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
4734
4735	if (!(hw->flags & SKY2_HW_USE_MSI)) {
4736		/* MSI test failed, go back to INTx mode */
4737		dev_info(&pdev->dev, "No interrupt generated using MSI, "
4738			 "switching to INTx mode.\n");
4739
4740		err = -EOPNOTSUPP;
4741		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4742	}
4743
4744	sky2_write32(hw, B0_IMSK, 0);
4745	sky2_read32(hw, B0_IMSK);
4746
4747	free_irq(pdev->irq, hw);
4748
4749	return err;
4750}
4751
4752/* This driver supports yukon2 chipset only */
4753static const char *sky2_name(u8 chipid, char *buf, int sz)
4754{
4755	static const char *const name[] = {
4756		"XL",		/* 0xb3 */
4757		"EC Ultra", 	/* 0xb4 */
4758		"Extreme",	/* 0xb5 */
4759		"EC",		/* 0xb6 */
4760		"FE",		/* 0xb7 */
4761		"FE+",		/* 0xb8 */
4762		"Supreme",	/* 0xb9 */
4763		"UL 2",		/* 0xba */
4764		"Unknown",	/* 0xbb */
4765		"Optima",	/* 0xbc */
4766		"OptimaEEE",    /* 0xbd */
4767		"Optima 2",	/* 0xbe */
4768	};
4769
4770	if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2)
4771		snprintf(buf, sz, "%s", name[chipid - CHIP_ID_YUKON_XL]);
4772	else
4773		snprintf(buf, sz, "(chip %#x)", chipid);
4774	return buf;
4775}
4776
4777static const struct dmi_system_id msi_blacklist[] = {
4778	{
4779		.ident = "Dell Inspiron 1545",
4780		.matches = {
4781			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
4782			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
4783		},
4784	},
4785	{
4786		.ident = "Gateway P-79",
4787		.matches = {
4788			DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
4789			DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
4790		},
4791	},
4792	{
4793		.ident = "ASUS P5W DH Deluxe",
4794		.matches = {
4795			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTEK COMPUTER INC"),
4796			DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
4797		},
4798	},
4799	{
4800		.ident = "ASUS P6T",
4801		.matches = {
4802			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4803			DMI_MATCH(DMI_BOARD_NAME, "P6T"),
4804		},
4805	},
4806	{
4807		.ident = "ASUS P6X",
4808		.matches = {
4809			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4810			DMI_MATCH(DMI_BOARD_NAME, "P6X"),
4811		},
4812	},
4813	{}
4814};
4815
4816static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4817{
4818	struct net_device *dev, *dev1;
4819	struct sky2_hw *hw;
4820	int err, using_dac = 0, wol_default;
4821	u32 reg;
4822	char buf1[16];
4823
4824	err = pci_enable_device(pdev);
4825	if (err) {
4826		dev_err(&pdev->dev, "cannot enable PCI device\n");
4827		goto err_out;
4828	}
4829
4830	/* Get configuration information
4831	 * Note: only regular PCI config access once to test for HW issues
4832	 *       other PCI access through shared memory for speed and to
4833	 *	 avoid MMCONFIG problems.
4834	 */
4835	err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
4836	if (err) {
4837		dev_err(&pdev->dev, "PCI read config failed\n");
4838		goto err_out_disable;
4839	}
4840
4841	if (~reg == 0) {
4842		dev_err(&pdev->dev, "PCI configuration read error\n");
4843		err = -EIO;
4844		goto err_out_disable;
4845	}
4846
4847	err = pci_request_regions(pdev, DRV_NAME);
4848	if (err) {
4849		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
4850		goto err_out_disable;
4851	}
4852
4853	pci_set_master(pdev);
4854
4855	if (sizeof(dma_addr_t) > sizeof(u32) &&
4856	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4857		using_dac = 1;
4858		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4859		if (err < 0) {
4860			dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
4861				"for consistent allocations\n");
4862			goto err_out_free_regions;
4863		}
4864	} else {
4865		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4866		if (err) {
4867			dev_err(&pdev->dev, "no usable DMA configuration\n");
4868			goto err_out_free_regions;
4869		}
4870	}
4871
4872
4873#ifdef __BIG_ENDIAN
4874	/* The sk98lin vendor driver uses hardware byte swapping but
4875	 * this driver uses software swapping.
4876	 */
4877	reg &= ~PCI_REV_DESC;
4878	err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
4879	if (err) {
4880		dev_err(&pdev->dev, "PCI write config failed\n");
4881		goto err_out_free_regions;
4882	}
4883#endif
4884
4885	wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4886
4887	err = -ENOMEM;
4888
4889	hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
4890		     + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
4891	if (!hw)
4892		goto err_out_free_regions;
4893
4894	hw->pdev = pdev;
4895	sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
4896
4897	hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
4898	if (!hw->regs) {
4899		dev_err(&pdev->dev, "cannot map device registers\n");
4900		goto err_out_free_hw;
4901	}
4902
4903	err = sky2_init(hw);
4904	if (err)
4905		goto err_out_iounmap;
4906
4907	/* ring for status responses */
4908	hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
4909	hw->st_le = dma_alloc_coherent(&pdev->dev,
4910				       hw->st_size * sizeof(struct sky2_status_le),
4911				       &hw->st_dma, GFP_KERNEL);
4912	if (!hw->st_le) {
4913		err = -ENOMEM;
4914		goto err_out_reset;
4915	}
4916
4917	dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
4918		 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
4919
4920	sky2_reset(hw);
4921
4922	dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
4923	if (!dev) {
4924		err = -ENOMEM;
4925		goto err_out_free_pci;
4926	}
4927
4928	if (disable_msi == -1)
4929		disable_msi = !!dmi_check_system(msi_blacklist);
4930
4931	if (!disable_msi && pci_enable_msi(pdev) == 0) {
4932		err = sky2_test_msi(hw);
4933		if (err) {
4934			pci_disable_msi(pdev);
4935			if (err != -EOPNOTSUPP)
4936				goto err_out_free_netdev;
4937		}
4938	}
4939
4940	netif_napi_add(dev, &hw->napi, sky2_poll);
4941
4942	err = register_netdev(dev);
4943	if (err) {
4944		dev_err(&pdev->dev, "cannot register net device\n");
4945		goto err_out_free_netdev;
4946	}
4947
4948	netif_carrier_off(dev);
4949
4950	sky2_show_addr(dev);
4951
4952	if (hw->ports > 1) {
4953		dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
4954		if (!dev1) {
4955			err = -ENOMEM;
4956			goto err_out_unregister;
4957		}
4958
4959		err = register_netdev(dev1);
4960		if (err) {
4961			dev_err(&pdev->dev, "cannot register second net device\n");
4962			goto err_out_free_dev1;
4963		}
4964
4965		err = sky2_setup_irq(hw, hw->irq_name);
4966		if (err)
4967			goto err_out_unregister_dev1;
4968
4969		sky2_show_addr(dev1);
4970	}
4971
4972	timer_setup(&hw->watchdog_timer, sky2_watchdog, 0);
4973	INIT_WORK(&hw->restart_work, sky2_restart);
4974
4975	pci_set_drvdata(pdev, hw);
4976	pdev->d3hot_delay = 300;
4977
4978	return 0;
4979
4980err_out_unregister_dev1:
4981	unregister_netdev(dev1);
4982err_out_free_dev1:
4983	free_netdev(dev1);
4984err_out_unregister:
4985	unregister_netdev(dev);
4986err_out_free_netdev:
4987	if (hw->flags & SKY2_HW_USE_MSI)
4988		pci_disable_msi(pdev);
4989	free_netdev(dev);
4990err_out_free_pci:
4991	dma_free_coherent(&pdev->dev,
4992			  hw->st_size * sizeof(struct sky2_status_le),
4993			  hw->st_le, hw->st_dma);
4994err_out_reset:
4995	sky2_write8(hw, B0_CTST, CS_RST_SET);
4996err_out_iounmap:
4997	iounmap(hw->regs);
4998err_out_free_hw:
4999	kfree(hw);
5000err_out_free_regions:
5001	pci_release_regions(pdev);
5002err_out_disable:
5003	pci_disable_device(pdev);
5004err_out:
5005	return err;
5006}
5007
5008static void sky2_remove(struct pci_dev *pdev)
5009{
5010	struct sky2_hw *hw = pci_get_drvdata(pdev);
5011	int i;
5012
5013	if (!hw)
5014		return;
5015
5016	timer_shutdown_sync(&hw->watchdog_timer);
5017	cancel_work_sync(&hw->restart_work);
5018
5019	for (i = hw->ports-1; i >= 0; --i)
5020		unregister_netdev(hw->dev[i]);
5021
5022	sky2_write32(hw, B0_IMSK, 0);
5023	sky2_read32(hw, B0_IMSK);
5024
5025	sky2_power_aux(hw);
5026
5027	sky2_write8(hw, B0_CTST, CS_RST_SET);
5028	sky2_read8(hw, B0_CTST);
5029
5030	if (hw->ports > 1) {
5031		napi_disable(&hw->napi);
5032		free_irq(pdev->irq, hw);
5033	}
5034
5035	if (hw->flags & SKY2_HW_USE_MSI)
5036		pci_disable_msi(pdev);
5037	dma_free_coherent(&pdev->dev,
5038			  hw->st_size * sizeof(struct sky2_status_le),
5039			  hw->st_le, hw->st_dma);
5040	pci_release_regions(pdev);
5041	pci_disable_device(pdev);
5042
5043	for (i = hw->ports-1; i >= 0; --i)
5044		free_netdev(hw->dev[i]);
5045
5046	iounmap(hw->regs);
5047	kfree(hw);
5048}
5049
5050static int sky2_suspend(struct device *dev)
5051{
5052	struct sky2_hw *hw = dev_get_drvdata(dev);
5053	int i;
5054
5055	if (!hw)
5056		return 0;
5057
5058	del_timer_sync(&hw->watchdog_timer);
5059	cancel_work_sync(&hw->restart_work);
5060
5061	rtnl_lock();
5062
5063	sky2_all_down(hw);
5064	for (i = 0; i < hw->ports; i++) {
5065		struct net_device *dev = hw->dev[i];
5066		struct sky2_port *sky2 = netdev_priv(dev);
5067
5068		if (sky2->wol)
5069			sky2_wol_init(sky2);
5070	}
5071
5072	sky2_power_aux(hw);
5073	rtnl_unlock();
5074
5075	return 0;
5076}
5077
5078#ifdef CONFIG_PM_SLEEP
5079static int sky2_resume(struct device *dev)
5080{
5081	struct pci_dev *pdev = to_pci_dev(dev);
5082	struct sky2_hw *hw = pci_get_drvdata(pdev);
5083	int err;
5084
5085	if (!hw)
5086		return 0;
5087
5088	/* Re-enable all clocks */
5089	err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
5090	if (err) {
5091		dev_err(&pdev->dev, "PCI write config failed\n");
5092		goto out;
5093	}
5094
5095	rtnl_lock();
5096	sky2_reset(hw);
5097	sky2_all_up(hw);
5098	rtnl_unlock();
5099
5100	return 0;
5101out:
5102
5103	dev_err(&pdev->dev, "resume failed (%d)\n", err);
5104	pci_disable_device(pdev);
5105	return err;
5106}
5107
5108static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
5109#define SKY2_PM_OPS (&sky2_pm_ops)
5110
5111#else
5112
5113#define SKY2_PM_OPS NULL
5114#endif
5115
5116static void sky2_shutdown(struct pci_dev *pdev)
5117{
5118	struct sky2_hw *hw = pci_get_drvdata(pdev);
5119	int port;
5120
5121	for (port = 0; port < hw->ports; port++) {
5122		struct net_device *ndev = hw->dev[port];
5123
5124		rtnl_lock();
5125		if (netif_running(ndev)) {
5126			dev_close(ndev);
5127			netif_device_detach(ndev);
5128		}
5129		rtnl_unlock();
5130	}
5131	sky2_suspend(&pdev->dev);
5132	pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
5133	pci_set_power_state(pdev, PCI_D3hot);
5134}
5135
5136static struct pci_driver sky2_driver = {
5137	.name = DRV_NAME,
5138	.id_table = sky2_id_table,
5139	.probe = sky2_probe,
5140	.remove = sky2_remove,
5141	.shutdown = sky2_shutdown,
5142	.driver.pm = SKY2_PM_OPS,
5143};
5144
5145static int __init sky2_init_module(void)
5146{
5147	pr_info("driver version " DRV_VERSION "\n");
5148
5149	sky2_debug_init();
5150	return pci_register_driver(&sky2_driver);
5151}
5152
5153static void __exit sky2_cleanup_module(void)
5154{
5155	pci_unregister_driver(&sky2_driver);
5156	sky2_debug_cleanup();
5157}
5158
5159module_init(sky2_init_module);
5160module_exit(sky2_cleanup_module);
5161
5162MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
5163MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
5164MODULE_LICENSE("GPL");
5165MODULE_VERSION(DRV_VERSION);