Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
   4 *
   5 * Derived from Intel e1000 driver
   6 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
   7 */
   8
   9#include "atl1c.h"
  10
  11char atl1c_driver_name[] = "atl1c";
  12
  13/*
  14 * atl1c_pci_tbl - PCI Device ID Table
  15 *
  16 * Wildcard entries (PCI_ANY_ID) should come last
  17 * Last entry must be all 0s
  18 *
  19 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  20 *   Class, Class Mask, private data (not used) }
  21 */
  22static const struct pci_device_id atl1c_pci_tbl[] = {
  23	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
  24	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
  25	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
  26	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
  27	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
  28	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
  29	/* required last entry */
  30	{ 0 }
  31};
  32MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
  33
  34MODULE_AUTHOR("Jie Yang");
  35MODULE_AUTHOR("Qualcomm Atheros Inc.");
  36MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver");
  37MODULE_LICENSE("GPL");
  38
  39struct atl1c_qregs {
  40	u16 tpd_addr_lo;
  41	u16 tpd_prod;
  42	u16 tpd_cons;
  43	u16 rfd_addr_lo;
  44	u16 rrd_addr_lo;
  45	u16 rfd_prod;
  46	u32 tx_isr;
  47	u32 rx_isr;
  48};
  49
  50static struct atl1c_qregs atl1c_qregs[AT_MAX_TRANSMIT_QUEUE] = {
  51	{
  52		REG_TPD_PRI0_ADDR_LO, REG_TPD_PRI0_PIDX, REG_TPD_PRI0_CIDX,
  53		REG_RFD0_HEAD_ADDR_LO, REG_RRD0_HEAD_ADDR_LO,
  54		REG_MB_RFD0_PROD_IDX, ISR_TX_PKT_0, ISR_RX_PKT_0
  55	},
  56	{
  57		REG_TPD_PRI1_ADDR_LO, REG_TPD_PRI1_PIDX, REG_TPD_PRI1_CIDX,
  58		REG_RFD1_HEAD_ADDR_LO, REG_RRD1_HEAD_ADDR_LO,
  59		REG_MB_RFD1_PROD_IDX, ISR_TX_PKT_1, ISR_RX_PKT_1
  60	},
  61	{
  62		REG_TPD_PRI2_ADDR_LO, REG_TPD_PRI2_PIDX, REG_TPD_PRI2_CIDX,
  63		REG_RFD2_HEAD_ADDR_LO, REG_RRD2_HEAD_ADDR_LO,
  64		REG_MB_RFD2_PROD_IDX, ISR_TX_PKT_2, ISR_RX_PKT_2
  65	},
  66	{
  67		REG_TPD_PRI3_ADDR_LO, REG_TPD_PRI3_PIDX, REG_TPD_PRI3_CIDX,
  68		REG_RFD3_HEAD_ADDR_LO, REG_RRD3_HEAD_ADDR_LO,
  69		REG_MB_RFD3_PROD_IDX, ISR_TX_PKT_3, ISR_RX_PKT_3
  70	},
  71};
  72
  73static int atl1c_stop_mac(struct atl1c_hw *hw);
  74static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
  75static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
  76static void atl1c_start_mac(struct atl1c_adapter *adapter);
  77static int atl1c_up(struct atl1c_adapter *adapter);
  78static void atl1c_down(struct atl1c_adapter *adapter);
  79static int atl1c_reset_mac(struct atl1c_hw *hw);
  80static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
  81static int atl1c_configure(struct atl1c_adapter *adapter);
  82static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
  83				 bool napi_mode);
  84
  85
  86static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  87	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
  88static void atl1c_pcie_patch(struct atl1c_hw *hw)
  89{
  90	u32 mst_data, data;
  91
  92	/* pclk sel could switch to 25M */
  93	AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data);
  94	mst_data &= ~MASTER_CTRL_CLK_SEL_DIS;
  95	AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data);
  96
  97	/* WoL/PCIE related settings */
  98	if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
  99		AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
 100		data |= PCIE_PHYMISC_FORCE_RCV_DET;
 101		AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
 102	} else { /* new dev set bit5 of MASTER */
 103		if (!(mst_data & MASTER_CTRL_WAKEN_25M))
 104			AT_WRITE_REG(hw, REG_MASTER_CTRL,
 105				mst_data | MASTER_CTRL_WAKEN_25M);
 106	}
 107	/* aspm/PCIE setting only for l2cb 1.0 */
 108	if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
 109		AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
 110		data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW,
 111			L2CB1_PCIE_PHYMISC2_CDR_BW);
 112		data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH,
 113			L2CB1_PCIE_PHYMISC2_L0S_TH);
 114		AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
 115		/* extend L1 sync timer */
 116		AT_READ_REG(hw, REG_LINK_CTRL, &data);
 117		data |= LINK_CTRL_EXT_SYNC;
 118		AT_WRITE_REG(hw, REG_LINK_CTRL, data);
 119	}
 120	/* l2cb 1.x & l1d 1.x */
 121	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) {
 122		AT_READ_REG(hw, REG_PM_CTRL, &data);
 123		data |= PM_CTRL_L0S_BUFSRX_EN;
 124		AT_WRITE_REG(hw, REG_PM_CTRL, data);
 125		/* clear vendor msg */
 126		AT_READ_REG(hw, REG_DMA_DBG, &data);
 127		AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG);
 128	}
 129}
 130
 131/* FIXME: no need any more ? */
 132/*
 133 * atl1c_init_pcie - init PCIE module
 134 */
 135static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
 136{
 137	u32 data;
 138	u32 pci_cmd;
 139	struct pci_dev *pdev = hw->adapter->pdev;
 140	int pos;
 141
 142	AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
 143	pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
 144	pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
 145		PCI_COMMAND_IO);
 146	AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd);
 147
 148	/*
 149	 * Clear any PowerSaveing Settings
 150	 */
 151	pci_enable_wake(pdev, PCI_D3hot, 0);
 152	pci_enable_wake(pdev, PCI_D3cold, 0);
 153	/* wol sts read-clear */
 154	AT_READ_REG(hw, REG_WOL_CTRL, &data);
 155	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
 156
 157	/*
 158	 * Mask some pcie error bits
 159	 */
 160	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
 161	if (pos) {
 162		pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
 163		data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
 164		pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
 165	}
 166	/* clear error status */
 167	pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
 168			PCI_EXP_DEVSTA_NFED |
 169			PCI_EXP_DEVSTA_FED |
 170			PCI_EXP_DEVSTA_CED |
 171			PCI_EXP_DEVSTA_URD);
 172
 173	AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
 174	data &= ~LTSSM_ID_EN_WRO;
 175	AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
 176
 177	atl1c_pcie_patch(hw);
 178	if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
 179		atl1c_disable_l0s_l1(hw);
 180
 181	msleep(5);
 182}
 183
 184/**
 185 * atl1c_irq_enable - Enable default interrupt generation settings
 186 * @adapter: board private structure
 187 */
 188static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
 189{
 190	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
 191		AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF);
 192		AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
 193		AT_WRITE_FLUSH(&adapter->hw);
 194	}
 195}
 196
 197/**
 198 * atl1c_irq_disable - Mask off interrupt generation on the NIC
 199 * @adapter: board private structure
 200 */
 201static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
 202{
 203	atomic_inc(&adapter->irq_sem);
 204	AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
 205	AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
 206	AT_WRITE_FLUSH(&adapter->hw);
 207	synchronize_irq(adapter->pdev->irq);
 208}
 209
 
 
 
 
 
 
 
 
 
 
 210/*
 211 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
 212 * of the idle status register until the device is actually idle
 213 */
 214static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
 215{
 216	int timeout;
 217	u32 data;
 218
 219	for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
 220		AT_READ_REG(hw, REG_IDLE_STATUS, &data);
 221		if ((data & modu_ctrl) == 0)
 222			return 0;
 223		msleep(1);
 224	}
 225	return data;
 226}
 227
 228/**
 229 * atl1c_phy_config - Timer Call-back
 230 * @t: timer list containing pointer to netdev cast into an unsigned long
 231 */
 232static void atl1c_phy_config(struct timer_list *t)
 233{
 234	struct atl1c_adapter *adapter = from_timer(adapter, t,
 235						   phy_config_timer);
 236	struct atl1c_hw *hw = &adapter->hw;
 237	unsigned long flags;
 238
 239	spin_lock_irqsave(&adapter->mdio_lock, flags);
 240	atl1c_restart_autoneg(hw);
 241	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
 242}
 243
 244void atl1c_reinit_locked(struct atl1c_adapter *adapter)
 245{
 246	atl1c_down(adapter);
 247	atl1c_up(adapter);
 248	clear_bit(__AT_RESETTING, &adapter->flags);
 249}
 250
 251static void atl1c_check_link_status(struct atl1c_adapter *adapter)
 252{
 253	struct atl1c_hw *hw = &adapter->hw;
 254	struct net_device *netdev = adapter->netdev;
 255	struct pci_dev    *pdev   = adapter->pdev;
 256	int err;
 257	unsigned long flags;
 258	u16 speed, duplex;
 259	bool link;
 260
 261	spin_lock_irqsave(&adapter->mdio_lock, flags);
 262	link = atl1c_get_link_status(hw);
 263	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
 264
 265	if (!link) {
 266		/* link down */
 267		netif_carrier_off(netdev);
 268		hw->hibernate = true;
 269		if (atl1c_reset_mac(hw) != 0)
 270			if (netif_msg_hw(adapter))
 271				dev_warn(&pdev->dev, "reset mac failed\n");
 272		atl1c_set_aspm(hw, SPEED_0);
 273		atl1c_post_phy_linkchg(hw, SPEED_0);
 274		atl1c_reset_dma_ring(adapter);
 275		atl1c_configure(adapter);
 276	} else {
 277		/* Link Up */
 278		hw->hibernate = false;
 279		spin_lock_irqsave(&adapter->mdio_lock, flags);
 280		err = atl1c_get_speed_and_duplex(hw, &speed, &duplex);
 281		spin_unlock_irqrestore(&adapter->mdio_lock, flags);
 282		if (unlikely(err))
 283			return;
 284		/* link result is our setting */
 285		if (adapter->link_speed != speed ||
 286		    adapter->link_duplex != duplex) {
 287			adapter->link_speed  = speed;
 288			adapter->link_duplex = duplex;
 289			atl1c_set_aspm(hw, speed);
 290			atl1c_post_phy_linkchg(hw, speed);
 291			atl1c_start_mac(adapter);
 292			if (netif_msg_link(adapter))
 293				dev_info(&pdev->dev,
 294					"%s: %s NIC Link is Up<%d Mbps %s>\n",
 295					atl1c_driver_name, netdev->name,
 296					adapter->link_speed,
 297					adapter->link_duplex == FULL_DUPLEX ?
 298					"Full Duplex" : "Half Duplex");
 299		}
 300		if (!netif_carrier_ok(netdev))
 301			netif_carrier_on(netdev);
 302	}
 303}
 304
 305static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
 306{
 307	struct net_device *netdev = adapter->netdev;
 308	struct pci_dev    *pdev   = adapter->pdev;
 309	bool link;
 310
 311	spin_lock(&adapter->mdio_lock);
 312	link = atl1c_get_link_status(&adapter->hw);
 313	spin_unlock(&adapter->mdio_lock);
 314	/* notify upper layer link down ASAP */
 315	if (!link) {
 316		if (netif_carrier_ok(netdev)) {
 317			/* old link state: Up */
 318			netif_carrier_off(netdev);
 319			if (netif_msg_link(adapter))
 320				dev_info(&pdev->dev,
 321					"%s: %s NIC Link is Down\n",
 322					atl1c_driver_name, netdev->name);
 323			adapter->link_speed = SPEED_0;
 324		}
 325	}
 326
 327	set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event);
 328	schedule_work(&adapter->common_task);
 329}
 330
 331static void atl1c_common_task(struct work_struct *work)
 332{
 333	struct atl1c_adapter *adapter;
 334	struct net_device *netdev;
 335
 336	adapter = container_of(work, struct atl1c_adapter, common_task);
 337	netdev = adapter->netdev;
 338
 339	if (test_bit(__AT_DOWN, &adapter->flags))
 340		return;
 341
 342	if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
 343		netif_device_detach(netdev);
 344		atl1c_down(adapter);
 345		atl1c_up(adapter);
 346		netif_device_attach(netdev);
 347	}
 348
 349	if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
 350		&adapter->work_event)) {
 351		atl1c_irq_disable(adapter);
 352		atl1c_check_link_status(adapter);
 353		atl1c_irq_enable(adapter);
 354	}
 355}
 356
 357
 358static void atl1c_del_timer(struct atl1c_adapter *adapter)
 359{
 360	del_timer_sync(&adapter->phy_config_timer);
 361}
 362
 363
 364/**
 365 * atl1c_tx_timeout - Respond to a Tx Hang
 366 * @netdev: network interface device structure
 367 * @txqueue: index of hanging tx queue
 368 */
 369static void atl1c_tx_timeout(struct net_device *netdev, unsigned int txqueue)
 370{
 371	struct atl1c_adapter *adapter = netdev_priv(netdev);
 372
 373	/* Do the reset outside of interrupt context */
 374	set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
 375	schedule_work(&adapter->common_task);
 376}
 377
 378/**
 379 * atl1c_set_multi - Multicast and Promiscuous mode set
 380 * @netdev: network interface device structure
 381 *
 382 * The set_multi entry point is called whenever the multicast address
 383 * list or the network interface flags are updated.  This routine is
 384 * responsible for configuring the hardware for proper multicast,
 385 * promiscuous mode, and all-multi behavior.
 386 */
 387static void atl1c_set_multi(struct net_device *netdev)
 388{
 389	struct atl1c_adapter *adapter = netdev_priv(netdev);
 390	struct atl1c_hw *hw = &adapter->hw;
 391	struct netdev_hw_addr *ha;
 392	u32 mac_ctrl_data;
 393	u32 hash_value;
 394
 395	/* Check for Promiscuous and All Multicast modes */
 396	AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
 397
 398	if (netdev->flags & IFF_PROMISC) {
 399		mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
 400	} else if (netdev->flags & IFF_ALLMULTI) {
 401		mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
 402		mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
 403	} else {
 404		mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
 405	}
 406
 407	AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
 408
 409	/* clear the old settings from the multicast hash table */
 410	AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
 411	AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
 412
 413	/* comoute mc addresses' hash value ,and put it into hash table */
 414	netdev_for_each_mc_addr(ha, netdev) {
 415		hash_value = atl1c_hash_mc_addr(hw, ha->addr);
 416		atl1c_hash_set(hw, hash_value);
 417	}
 418}
 419
 420static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 421{
 422	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
 423		/* enable VLAN tag insert/strip */
 424		*mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
 425	} else {
 426		/* disable VLAN tag insert/strip */
 427		*mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
 428	}
 429}
 430
 431static void atl1c_vlan_mode(struct net_device *netdev,
 432	netdev_features_t features)
 433{
 434	struct atl1c_adapter *adapter = netdev_priv(netdev);
 435	struct pci_dev *pdev = adapter->pdev;
 436	u32 mac_ctrl_data = 0;
 437
 438	if (netif_msg_pktdata(adapter))
 439		dev_dbg(&pdev->dev, "atl1c_vlan_mode\n");
 440
 441	atl1c_irq_disable(adapter);
 442	AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data);
 443	__atl1c_vlan_mode(features, &mac_ctrl_data);
 444	AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
 445	atl1c_irq_enable(adapter);
 446}
 447
 448static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
 449{
 450	struct pci_dev *pdev = adapter->pdev;
 451
 452	if (netif_msg_pktdata(adapter))
 453		dev_dbg(&pdev->dev, "atl1c_restore_vlan\n");
 454	atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
 455}
 456
 457/**
 458 * atl1c_set_mac_addr - Change the Ethernet Address of the NIC
 459 * @netdev: network interface device structure
 460 * @p: pointer to an address structure
 461 *
 462 * Returns 0 on success, negative on failure
 463 */
 464static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
 465{
 466	struct atl1c_adapter *adapter = netdev_priv(netdev);
 467	struct sockaddr *addr = p;
 468
 469	if (!is_valid_ether_addr(addr->sa_data))
 470		return -EADDRNOTAVAIL;
 471
 472	if (netif_running(netdev))
 473		return -EBUSY;
 474
 475	eth_hw_addr_set(netdev, addr->sa_data);
 476	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
 477
 478	atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
 479
 480	return 0;
 481}
 482
 483static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
 484				struct net_device *dev)
 485{
 
 486	int mtu = dev->mtu;
 487
 488	adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
 489		roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
 
 
 
 
 490}
 491
 492static netdev_features_t atl1c_fix_features(struct net_device *netdev,
 493	netdev_features_t features)
 494{
 495	struct atl1c_adapter *adapter = netdev_priv(netdev);
 496	struct atl1c_hw *hw = &adapter->hw;
 497
 498	/*
 499	 * Since there is no support for separate rx/tx vlan accel
 500	 * enable/disable make sure tx flag is always in same state as rx.
 501	 */
 502	if (features & NETIF_F_HW_VLAN_CTAG_RX)
 503		features |= NETIF_F_HW_VLAN_CTAG_TX;
 504	else
 505		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 506
 507	if (hw->nic_type != athr_mt) {
 508		if (netdev->mtu > MAX_TSO_FRAME_SIZE)
 509			features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
 510	}
 511
 512	return features;
 513}
 514
 515static int atl1c_set_features(struct net_device *netdev,
 516	netdev_features_t features)
 517{
 518	netdev_features_t changed = netdev->features ^ features;
 519
 520	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 521		atl1c_vlan_mode(netdev, features);
 522
 523	return 0;
 524}
 525
 526static void atl1c_set_max_mtu(struct net_device *netdev)
 527{
 528	struct atl1c_adapter *adapter = netdev_priv(netdev);
 529	struct atl1c_hw *hw = &adapter->hw;
 530
 531	switch (hw->nic_type) {
 532	/* These (GbE) devices support jumbo packets, max_mtu 6122 */
 533	case athr_l1c:
 534	case athr_l1d:
 535	case athr_l1d_2:
 536		netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
 537			(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
 538		break;
 539	case athr_mt:
 540		netdev->max_mtu = 9500;
 541		break;
 542		/* The 10/100 devices don't support jumbo packets, max_mtu 1500 */
 543	default:
 544		netdev->max_mtu = ETH_DATA_LEN;
 545		break;
 546	}
 547}
 548
 549/**
 550 * atl1c_change_mtu - Change the Maximum Transfer Unit
 551 * @netdev: network interface device structure
 552 * @new_mtu: new value for maximum frame size
 553 *
 554 * Returns 0 on success, negative on failure
 555 */
 556static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
 557{
 558	struct atl1c_adapter *adapter = netdev_priv(netdev);
 559
 560	/* set MTU */
 561	if (netif_running(netdev)) {
 562		while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
 563			msleep(1);
 564		netdev->mtu = new_mtu;
 565		adapter->hw.max_frame_size = new_mtu;
 566		atl1c_set_rxbufsize(adapter, netdev);
 567		atl1c_down(adapter);
 568		netdev_update_features(netdev);
 569		atl1c_up(adapter);
 570		clear_bit(__AT_RESETTING, &adapter->flags);
 571	}
 572	return 0;
 573}
 574
 575/*
 576 *  caller should hold mdio_lock
 577 */
 578static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
 579{
 580	struct atl1c_adapter *adapter = netdev_priv(netdev);
 581	u16 result;
 582
 583	atl1c_read_phy_reg(&adapter->hw, reg_num, &result);
 584	return result;
 585}
 586
 587static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
 588			     int reg_num, int val)
 589{
 590	struct atl1c_adapter *adapter = netdev_priv(netdev);
 591
 592	atl1c_write_phy_reg(&adapter->hw, reg_num, val);
 593}
 594
 595static int atl1c_mii_ioctl(struct net_device *netdev,
 596			   struct ifreq *ifr, int cmd)
 597{
 598	struct atl1c_adapter *adapter = netdev_priv(netdev);
 599	struct pci_dev *pdev = adapter->pdev;
 600	struct mii_ioctl_data *data = if_mii(ifr);
 601	unsigned long flags;
 602	int retval = 0;
 603
 604	if (!netif_running(netdev))
 605		return -EINVAL;
 606
 607	spin_lock_irqsave(&adapter->mdio_lock, flags);
 608	switch (cmd) {
 609	case SIOCGMIIPHY:
 610		data->phy_id = 0;
 611		break;
 612
 613	case SIOCGMIIREG:
 614		if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
 615				    &data->val_out)) {
 616			retval = -EIO;
 617			goto out;
 618		}
 619		break;
 620
 621	case SIOCSMIIREG:
 622		if (data->reg_num & ~(0x1F)) {
 623			retval = -EFAULT;
 624			goto out;
 625		}
 626
 627		dev_dbg(&pdev->dev, "<atl1c_mii_ioctl> write %x %x",
 628				data->reg_num, data->val_in);
 629		if (atl1c_write_phy_reg(&adapter->hw,
 630				     data->reg_num, data->val_in)) {
 631			retval = -EIO;
 632			goto out;
 633		}
 634		break;
 635
 636	default:
 637		retval = -EOPNOTSUPP;
 638		break;
 639	}
 640out:
 641	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
 642	return retval;
 643}
 644
 645static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 646{
 647	switch (cmd) {
 648	case SIOCGMIIPHY:
 649	case SIOCGMIIREG:
 650	case SIOCSMIIREG:
 651		return atl1c_mii_ioctl(netdev, ifr, cmd);
 652	default:
 653		return -EOPNOTSUPP;
 654	}
 655}
 656
 657/**
 658 * atl1c_alloc_queues - Allocate memory for all rings
 659 * @adapter: board private structure to initialize
 660 *
 661 */
 662static int atl1c_alloc_queues(struct atl1c_adapter *adapter)
 663{
 664	return 0;
 665}
 666
 667static enum atl1c_nic_type atl1c_get_mac_type(struct pci_dev *pdev,
 668					      u8 __iomem *hw_addr)
 669{
 670	switch (pdev->device) {
 671	case PCI_DEVICE_ID_ATTANSIC_L2C:
 672		return athr_l2c;
 673	case PCI_DEVICE_ID_ATTANSIC_L1C:
 674		return athr_l1c;
 675	case PCI_DEVICE_ID_ATHEROS_L2C_B:
 676		return athr_l2c_b;
 677	case PCI_DEVICE_ID_ATHEROS_L2C_B2:
 678		return athr_l2c_b2;
 679	case PCI_DEVICE_ID_ATHEROS_L1D:
 680		return athr_l1d;
 681	case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
 682		if (readl(hw_addr + REG_MT_MAGIC) == MT_MAGIC)
 683			return athr_mt;
 684		return athr_l1d_2;
 685	default:
 686		return athr_l1c;
 687	}
 688}
 689
 690static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
 691{
 692	u32 link_ctrl_data;
 693
 694	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
 695
 696	hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE  |
 697			 ATL1C_TXQ_MODE_ENHANCE;
 698	hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT |
 699			  ATL1C_ASPM_L1_SUPPORT;
 700	hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
 701
 702	if (hw->nic_type == athr_l1c ||
 703	    hw->nic_type == athr_l1d ||
 704	    hw->nic_type == athr_l1d_2)
 705		hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
 706	return 0;
 707}
 708
 709struct atl1c_platform_patch {
 710	u16 pci_did;
 711	u8  pci_revid;
 712	u16 subsystem_vid;
 713	u16 subsystem_did;
 714	u32 patch_flag;
 715#define ATL1C_LINK_PATCH	0x1
 716};
 717static const struct atl1c_platform_patch plats[] = {
 718{0x2060, 0xC1, 0x1019, 0x8152, 0x1},
 719{0x2060, 0xC1, 0x1019, 0x2060, 0x1},
 720{0x2060, 0xC1, 0x1019, 0xE000, 0x1},
 721{0x2062, 0xC0, 0x1019, 0x8152, 0x1},
 722{0x2062, 0xC0, 0x1019, 0x2062, 0x1},
 723{0x2062, 0xC0, 0x1458, 0xE000, 0x1},
 724{0x2062, 0xC1, 0x1019, 0x8152, 0x1},
 725{0x2062, 0xC1, 0x1019, 0x2062, 0x1},
 726{0x2062, 0xC1, 0x1458, 0xE000, 0x1},
 727{0x2062, 0xC1, 0x1565, 0x2802, 0x1},
 728{0x2062, 0xC1, 0x1565, 0x2801, 0x1},
 729{0x1073, 0xC0, 0x1019, 0x8151, 0x1},
 730{0x1073, 0xC0, 0x1019, 0x1073, 0x1},
 731{0x1073, 0xC0, 0x1458, 0xE000, 0x1},
 732{0x1083, 0xC0, 0x1458, 0xE000, 0x1},
 733{0x1083, 0xC0, 0x1019, 0x8151, 0x1},
 734{0x1083, 0xC0, 0x1019, 0x1083, 0x1},
 735{0x1083, 0xC0, 0x1462, 0x7680, 0x1},
 736{0x1083, 0xC0, 0x1565, 0x2803, 0x1},
 737{0},
 738};
 739
 740static void atl1c_patch_assign(struct atl1c_hw *hw)
 741{
 742	struct pci_dev	*pdev = hw->adapter->pdev;
 743	u32 misc_ctrl;
 744	int i = 0;
 745
 746	hw->msi_lnkpatch = false;
 747
 748	while (plats[i].pci_did != 0) {
 749		if (plats[i].pci_did == hw->device_id &&
 750		    plats[i].pci_revid == hw->revision_id &&
 751		    plats[i].subsystem_vid == hw->subsystem_vendor_id &&
 752		    plats[i].subsystem_did == hw->subsystem_id) {
 753			if (plats[i].patch_flag & ATL1C_LINK_PATCH)
 754				hw->msi_lnkpatch = true;
 755		}
 756		i++;
 757	}
 758
 759	if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
 760	    hw->revision_id == L2CB_V21) {
 761		/* config access mode */
 762		pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
 763				       REG_PCIE_DEV_MISC_CTRL);
 764		pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
 765		misc_ctrl &= ~0x100;
 766		pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
 767				       REG_PCIE_DEV_MISC_CTRL);
 768		pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl);
 769	}
 770}
 771/**
 772 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
 773 * @adapter: board private structure to initialize
 774 *
 775 * atl1c_sw_init initializes the Adapter private data structure.
 776 * Fields are initialized based on PCI device information and
 777 * OS network device settings (MTU size).
 778 */
 779static int atl1c_sw_init(struct atl1c_adapter *adapter)
 780{
 781	struct atl1c_hw *hw   = &adapter->hw;
 782	struct pci_dev	*pdev = adapter->pdev;
 783	u32 revision;
 784	int i;
 785
 786	adapter->wol = 0;
 787	device_set_wakeup_enable(&pdev->dev, false);
 788	adapter->link_speed = SPEED_0;
 789	adapter->link_duplex = FULL_DUPLEX;
 790	adapter->tpd_ring[0].count = 1024;
 791	adapter->rfd_ring[0].count = 512;
 792
 793	hw->vendor_id = pdev->vendor;
 794	hw->device_id = pdev->device;
 795	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 796	hw->subsystem_id = pdev->subsystem_device;
 797	pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision);
 798	hw->revision_id = revision & 0xFF;
 799	/* before link up, we assume hibernate is true */
 800	hw->hibernate = true;
 801	hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
 802	if (atl1c_setup_mac_funcs(hw) != 0) {
 803		dev_err(&pdev->dev, "set mac function pointers failed\n");
 804		return -1;
 805	}
 806	atl1c_patch_assign(hw);
 807
 808	hw->intr_mask = IMR_NORMAL_MASK;
 809	for (i = 0; i < adapter->tx_queue_count; ++i)
 810		hw->intr_mask |= atl1c_qregs[i].tx_isr;
 811	for (i = 0; i < adapter->rx_queue_count; ++i)
 812		hw->intr_mask |= atl1c_qregs[i].rx_isr;
 813	hw->phy_configured = false;
 814	hw->preamble_len = 7;
 815	hw->max_frame_size = adapter->netdev->mtu;
 816	hw->autoneg_advertised = ADVERTISED_Autoneg;
 817	hw->indirect_tab = 0xE4E4E4E4;
 818	hw->base_cpu = 0;
 819
 820	hw->ict = 50000;		/* 100ms */
 821	hw->smb_timer = 200000;	  	/* 400ms */
 822	hw->rx_imt = 200;
 823	hw->tx_imt = 1000;
 824
 825	hw->tpd_burst = 5;
 826	hw->rfd_burst = 8;
 827	hw->dma_order = atl1c_dma_ord_out;
 828	hw->dmar_block = atl1c_dma_req_1024;
 829
 830	if (atl1c_alloc_queues(adapter)) {
 831		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 832		return -ENOMEM;
 833	}
 834	/* TODO */
 835	atl1c_set_rxbufsize(adapter, adapter->netdev);
 836	atomic_set(&adapter->irq_sem, 1);
 837	spin_lock_init(&adapter->mdio_lock);
 838	spin_lock_init(&adapter->hw.intr_mask_lock);
 839	set_bit(__AT_DOWN, &adapter->flags);
 840
 841	return 0;
 842}
 843
 844static inline void atl1c_clean_buffer(struct pci_dev *pdev,
 845				      struct atl1c_buffer *buffer_info,
 846				      int budget)
 847{
 848	u16 pci_driection;
 849	if (buffer_info->flags & ATL1C_BUFFER_FREE)
 850		return;
 851	if (buffer_info->dma) {
 852		if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE)
 853			pci_driection = DMA_FROM_DEVICE;
 854		else
 855			pci_driection = DMA_TO_DEVICE;
 856
 857		if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
 858			dma_unmap_single(&pdev->dev, buffer_info->dma,
 859					 buffer_info->length, pci_driection);
 860		else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
 861			dma_unmap_page(&pdev->dev, buffer_info->dma,
 862				       buffer_info->length, pci_driection);
 863	}
 864	if (buffer_info->skb)
 865		napi_consume_skb(buffer_info->skb, budget);
 866	buffer_info->dma = 0;
 867	buffer_info->skb = NULL;
 868	ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
 869}
 870/**
 871 * atl1c_clean_tx_ring - Free Tx-skb
 872 * @adapter: board private structure
 873 * @queue: idx of transmit queue
 874 */
 875static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
 876				u32 queue)
 877{
 878	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
 879	struct atl1c_buffer *buffer_info;
 880	struct pci_dev *pdev = adapter->pdev;
 881	u16 index, ring_count;
 882
 883	ring_count = tpd_ring->count;
 884	for (index = 0; index < ring_count; index++) {
 885		buffer_info = &tpd_ring->buffer_info[index];
 886		atl1c_clean_buffer(pdev, buffer_info, 0);
 887	}
 888
 889	netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
 890
 891	/* Zero out Tx-buffers */
 892	memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
 893		ring_count);
 894	atomic_set(&tpd_ring->next_to_clean, 0);
 895	tpd_ring->next_to_use = 0;
 896}
 897
 898/**
 899 * atl1c_clean_rx_ring - Free rx-reservation skbs
 900 * @adapter: board private structure
 901 * @queue: idx of transmit queue
 902 */
 903static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue)
 904{
 905	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
 906	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
 907	struct atl1c_buffer *buffer_info;
 908	struct pci_dev *pdev = adapter->pdev;
 909	int j;
 910
 911	for (j = 0; j < rfd_ring->count; j++) {
 912		buffer_info = &rfd_ring->buffer_info[j];
 913		atl1c_clean_buffer(pdev, buffer_info, 0);
 914	}
 915	/* zero out the descriptor ring */
 916	memset(rfd_ring->desc, 0, rfd_ring->size);
 917	rfd_ring->next_to_clean = 0;
 918	rfd_ring->next_to_use = 0;
 919	rrd_ring->next_to_use = 0;
 920	rrd_ring->next_to_clean = 0;
 921}
 922
 923/*
 924 * Read / Write Ptr Initialize:
 925 */
 926static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
 927{
 928	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
 929	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
 930	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
 931	struct atl1c_buffer *buffer_info;
 932	int i, j;
 933
 934	for (i = 0; i < adapter->tx_queue_count; i++) {
 935		tpd_ring[i].next_to_use = 0;
 936		atomic_set(&tpd_ring[i].next_to_clean, 0);
 937		buffer_info = tpd_ring[i].buffer_info;
 938		for (j = 0; j < tpd_ring->count; j++)
 939			ATL1C_SET_BUFFER_STATE(&buffer_info[i],
 940					       ATL1C_BUFFER_FREE);
 941	}
 942	for (i = 0; i < adapter->rx_queue_count; i++) {
 943		rfd_ring[i].next_to_use = 0;
 944		rfd_ring[i].next_to_clean = 0;
 945		rrd_ring[i].next_to_use = 0;
 946		rrd_ring[i].next_to_clean = 0;
 947		for (j = 0; j < rfd_ring[i].count; j++) {
 948			buffer_info = &rfd_ring[i].buffer_info[j];
 949			ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
 950		}
 951	}
 952}
 953
 954/**
 955 * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
 956 * @adapter: board private structure
 957 *
 958 * Free all transmit software resources
 959 */
 960static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
 961{
 962	struct pci_dev *pdev = adapter->pdev;
 
 963
 964	dma_free_coherent(&pdev->dev, adapter->ring_header.size,
 965			  adapter->ring_header.desc, adapter->ring_header.dma);
 966	adapter->ring_header.desc = NULL;
 967
 968	/* Note: just free tdp_ring.buffer_info,
 969	 * it contain rfd_ring.buffer_info, do not double free
 970	 */
 971	if (adapter->tpd_ring[0].buffer_info) {
 972		kfree(adapter->tpd_ring[0].buffer_info);
 973		adapter->tpd_ring[0].buffer_info = NULL;
 974	}
 
 
 
 
 
 
 975}
 976
 977/**
 978 * atl1c_setup_ring_resources - allocate Tx / RX descriptor resources
 979 * @adapter: board private structure
 980 *
 981 * Return 0 on success, negative on failure
 982 */
 983static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
 984{
 985	struct pci_dev *pdev = adapter->pdev;
 986	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
 987	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
 988	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
 989	struct atl1c_ring_header *ring_header = &adapter->ring_header;
 990	int tqc = adapter->tx_queue_count;
 991	int rqc = adapter->rx_queue_count;
 992	int size;
 993	int i;
 994	int count = 0;
 995	u32 offset = 0;
 996
 997	/* Even though only one tpd queue is actually used, the "high"
 998	 * priority tpd queue also gets initialized
 999	 */
1000	if (tqc == 1)
1001		tqc = 2;
1002
1003	for (i = 1; i < tqc; i++)
1004		tpd_ring[i].count = tpd_ring[0].count;
1005
1006	size = sizeof(struct atl1c_buffer) * (tpd_ring->count * tqc +
1007					      rfd_ring->count * rqc);
1008	tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
1009	if (unlikely(!tpd_ring->buffer_info))
1010		goto err_nomem;
1011
1012	for (i = 0; i < tqc; i++) {
1013		tpd_ring[i].adapter = adapter;
1014		tpd_ring[i].num = i;
1015		tpd_ring[i].buffer_info = (tpd_ring->buffer_info + count);
1016		count += tpd_ring[i].count;
1017	}
1018
1019	for (i = 0; i < rqc; i++) {
1020		rrd_ring[i].adapter = adapter;
1021		rrd_ring[i].num = i;
1022		rrd_ring[i].count = rfd_ring[0].count;
1023		rfd_ring[i].count = rfd_ring[0].count;
1024		rfd_ring[i].buffer_info = (tpd_ring->buffer_info + count);
1025		count += rfd_ring->count;
1026	}
1027
1028	/*
1029	 * real ring DMA buffer
1030	 * each ring/block may need up to 8 bytes for alignment, hence the
1031	 * additional bytes tacked onto the end.
1032	 */
1033	ring_header->size =
1034		sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
1035		sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
1036		sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
1037		8 * 4;
1038
1039	ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
1040					       &ring_header->dma, GFP_KERNEL);
1041	if (unlikely(!ring_header->desc)) {
1042		dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
1043		goto err_nomem;
1044	}
1045	/* init TPD ring */
1046
1047	tpd_ring[0].dma = roundup(ring_header->dma, 8);
1048	offset = tpd_ring[0].dma - ring_header->dma;
1049	for (i = 0; i < tqc; i++) {
1050		tpd_ring[i].dma = ring_header->dma + offset;
1051		tpd_ring[i].desc = (u8 *)ring_header->desc + offset;
1052		tpd_ring[i].size =
1053			sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count;
1054		offset += roundup(tpd_ring[i].size, 8);
1055	}
1056	for (i = 0; i < rqc; i++) {
1057		/* init RFD ring */
1058		rfd_ring[i].dma = ring_header->dma + offset;
1059		rfd_ring[i].desc = (u8 *)ring_header->desc + offset;
1060		rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
1061			rfd_ring[i].count;
1062		offset += roundup(rfd_ring[i].size, 8);
1063
1064		/* init RRD ring */
1065		rrd_ring[i].dma = ring_header->dma + offset;
1066		rrd_ring[i].desc = (u8 *)ring_header->desc + offset;
1067		rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
1068			rrd_ring[i].count;
1069		offset += roundup(rrd_ring[i].size, 8);
1070	}
1071
1072	return 0;
1073
1074err_nomem:
1075	kfree(tpd_ring->buffer_info);
1076	return -ENOMEM;
1077}
1078
1079static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1080{
1081	struct atl1c_hw *hw = &adapter->hw;
1082	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
1083	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
1084	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
1085	int i;
1086	int tx_queue_count = adapter->tx_queue_count;
1087
1088	if (tx_queue_count == 1)
1089		tx_queue_count = 2;
1090
1091	/* TPD */
1092	AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
1093		     (u32)((tpd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
1094	/* just enable normal priority TX queue */
1095	for (i = 0; i < tx_queue_count; i++) {
1096		AT_WRITE_REG(hw, atl1c_qregs[i].tpd_addr_lo,
1097			     (u32)(tpd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1098	}
1099	AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
1100			(u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK));
1101
1102
1103	/* RFD */
1104	AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
1105		     (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1106	for (i = 0; i < adapter->rx_queue_count; i++) {
1107		AT_WRITE_REG(hw, atl1c_qregs[i].rfd_addr_lo,
1108			     (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1109	}
1110
1111	AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
1112			rfd_ring->count & RFD_RING_SIZE_MASK);
1113	AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
1114			adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
1115
1116	/* RRD */
1117	for (i = 0; i < adapter->rx_queue_count; i++) {
1118		AT_WRITE_REG(hw, atl1c_qregs[i].rrd_addr_lo,
1119			     (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1120	}
1121	AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
1122			(rrd_ring->count & RRD_RING_SIZE_MASK));
1123
1124	if (hw->nic_type == athr_l2c_b) {
1125		AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
1126		AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
1127		AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
1128		AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
1129		AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
1130		AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
1131		AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0);	/* TX watermark, to enter l1 state.*/
1132		AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0);		/* RXD threshold.*/
1133	}
1134	/* Load all of base address above */
1135	AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
1136}
1137
1138static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1139{
1140	struct atl1c_hw *hw = &adapter->hw;
1141	int max_pay_load;
1142	u16 tx_offload_thresh;
1143	u32 txq_ctrl_data;
1144
1145	tx_offload_thresh = MAX_TSO_FRAME_SIZE;
1146	AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
1147		(tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
1148	max_pay_load = pcie_get_readrq(adapter->pdev) >> 8;
1149	hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
1150	/*
1151	 * if BIOS had changed the dam-read-max-length to an invalid value,
1152	 * restore it to default value
1153	 */
1154	if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) {
1155		pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN);
1156		hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN;
1157	}
1158	txq_ctrl_data =
1159		hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ?
1160		L2CB_TXQ_CFGV : L1C_TXQ_CFGV;
1161
1162	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1163}
1164
1165static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1166{
1167	struct atl1c_hw *hw = &adapter->hw;
1168	u32 rxq_ctrl_data;
1169
1170	rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) <<
1171			RXQ_RFD_BURST_NUM_SHIFT;
1172
1173	if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
1174		rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
1175
1176	/* aspm for gigabit */
1177	if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0)
1178		rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT,
1179			ASPM_THRUPUT_LIMIT_100M);
1180
1181	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1182}
1183
1184static void atl1c_configure_dma(struct atl1c_adapter *adapter)
1185{
1186	struct atl1c_hw *hw = &adapter->hw;
1187	u32 dma_ctrl_data;
1188
1189	dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) |
1190		DMA_CTRL_RREQ_PRI_DATA |
1191		FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) |
1192		FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) |
1193		FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF);
1194
1195	AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1196}
1197
1198/*
1199 * Stop the mac, transmit and receive units
1200 * hw - Struct containing variables accessed by shared code
1201 * return : 0  or  idle status (if error)
1202 */
1203static int atl1c_stop_mac(struct atl1c_hw *hw)
1204{
1205	u32 data;
1206
1207	AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1208	data &= ~RXQ_CTRL_EN;
1209	AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1210
1211	AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1212	data &= ~TXQ_CTRL_EN;
1213	AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
1214
1215	atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY);
1216
1217	AT_READ_REG(hw, REG_MAC_CTRL, &data);
1218	data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
1219	AT_WRITE_REG(hw, REG_MAC_CTRL, data);
1220
1221	return (int)atl1c_wait_until_idle(hw,
1222		IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY);
1223}
1224
1225static void atl1c_start_mac(struct atl1c_adapter *adapter)
1226{
1227	struct atl1c_hw *hw = &adapter->hw;
1228	u32 mac, txq, rxq;
1229
1230	hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX;
1231	hw->mac_speed = adapter->link_speed == SPEED_1000 ?
1232		atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
1233
1234	AT_READ_REG(hw, REG_TXQ_CTRL, &txq);
1235	AT_READ_REG(hw, REG_RXQ_CTRL, &rxq);
1236	AT_READ_REG(hw, REG_MAC_CTRL, &mac);
1237
1238	txq |= TXQ_CTRL_EN;
1239	rxq |= RXQ_CTRL_EN;
1240	mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW |
1241	       MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW |
1242	       MAC_CTRL_ADD_CRC | MAC_CTRL_PAD |
1243	       MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN |
1244	       MAC_CTRL_HASH_ALG_CRC32;
1245	if (hw->mac_duplex)
1246		mac |= MAC_CTRL_DUPLX;
1247	else
1248		mac &= ~MAC_CTRL_DUPLX;
1249	mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed);
1250	mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len);
1251
1252	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq);
1253	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq);
1254	AT_WRITE_REG(hw, REG_MAC_CTRL, mac);
1255}
1256
1257/*
1258 * Reset the transmit and receive units; mask and clear all interrupts.
1259 * hw - Struct containing variables accessed by shared code
1260 * return : 0  or  idle status (if error)
1261 */
1262static int atl1c_reset_mac(struct atl1c_hw *hw)
1263{
1264	struct atl1c_adapter *adapter = hw->adapter;
1265	struct pci_dev *pdev = adapter->pdev;
1266	u32 ctrl_data = 0;
1267
1268	atl1c_stop_mac(hw);
1269	/*
1270	 * Issue Soft Reset to the MAC.  This will reset the chip's
1271	 * transmit, receive, DMA.  It will not effect
1272	 * the current PCI configuration.  The global reset bit is self-
1273	 * clearing, and should clear within a microsecond.
1274	 */
1275	AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data);
1276	ctrl_data |= MASTER_CTRL_OOB_DIS;
1277	AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST);
1278
1279	AT_WRITE_FLUSH(hw);
1280	msleep(10);
1281	/* Wait at least 10ms for All module to be Idle */
1282
1283	if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) {
1284		dev_err(&pdev->dev,
1285			"MAC state machine can't be idle since"
1286			" disabled for 10ms second\n");
1287		return -1;
1288	}
1289	AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data);
1290
1291	/* driver control speed/duplex */
1292	AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data);
1293	AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW);
1294
1295	/* clk switch setting */
1296	AT_READ_REG(hw, REG_SERDES, &ctrl_data);
1297	switch (hw->nic_type) {
1298	case athr_l2c_b:
1299		ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN |
1300				SERDES_MAC_CLK_SLOWDOWN);
1301		AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1302		break;
1303	case athr_l2c_b2:
1304	case athr_l1d_2:
1305		ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN;
1306		AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1307		break;
1308	default:
1309		break;
1310	}
1311
1312	return 0;
1313}
1314
1315static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
1316{
1317	u16 ctrl_flags = hw->ctrl_flags;
1318
1319	hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT);
1320	atl1c_set_aspm(hw, SPEED_0);
1321	hw->ctrl_flags = ctrl_flags;
1322}
1323
1324/*
1325 * Set ASPM state.
1326 * Enable/disable L0s/L1 depend on link state.
1327 */
1328static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
1329{
1330	u32 pm_ctrl_data;
1331	u32 link_l1_timer;
1332
1333	AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1334	pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN |
1335			  PM_CTRL_ASPM_L0S_EN |
1336			  PM_CTRL_MAC_ASPM_CHK);
1337	/* L1 timer */
1338	if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1339		pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S;
1340		link_l1_timer =
1341			link_speed == SPEED_1000 || link_speed == SPEED_100 ?
1342			L1D_PMCTRL_L1_ENTRY_TM_16US : 1;
1343		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1344			L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer);
1345	} else {
1346		link_l1_timer = hw->nic_type == athr_l2c_b ?
1347			L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM;
1348		if (link_speed != SPEED_1000 && link_speed != SPEED_100)
1349			link_l1_timer = 1;
1350		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1351			PM_CTRL_L1_ENTRY_TIMER, link_l1_timer);
1352	}
1353
1354	/* L0S/L1 enable */
1355	if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0)
1356		pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK;
1357	if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1358		pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK;
1359
1360	/* l2cb & l1d & l2cb2 & l1d2 */
1361	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1362	    hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1363		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1364			PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF);
1365		pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER |
1366				PM_CTRL_SERDES_PD_EX_L1 |
1367				PM_CTRL_CLK_SWH_L1;
1368		pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1369				  PM_CTRL_SERDES_PLL_L1_EN |
1370				  PM_CTRL_SERDES_BUFS_RX_L1_EN |
1371				  PM_CTRL_SA_DLY_EN |
1372				  PM_CTRL_HOTRST);
1373		/* disable l0s if link down or l2cb */
1374		if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b)
1375			pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1376	} else { /* l1c */
1377		pm_ctrl_data =
1378			FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0);
1379		if (link_speed != SPEED_0) {
1380			pm_ctrl_data |= PM_CTRL_SERDES_L1_EN |
1381					PM_CTRL_SERDES_PLL_L1_EN |
1382					PM_CTRL_SERDES_BUFS_RX_L1_EN;
1383			pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 |
1384					  PM_CTRL_CLK_SWH_L1 |
1385					  PM_CTRL_ASPM_L0S_EN |
1386					  PM_CTRL_ASPM_L1_EN);
1387		} else { /* link down */
1388			pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1389			pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1390					  PM_CTRL_SERDES_PLL_L1_EN |
1391					  PM_CTRL_SERDES_BUFS_RX_L1_EN |
1392					  PM_CTRL_ASPM_L0S_EN);
1393		}
1394	}
1395	AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1396
1397	return;
1398}
1399
1400/**
1401 * atl1c_configure_mac - Configure Transmit&Receive Unit after Reset
1402 * @adapter: board private structure
1403 *
1404 * Configure the Tx /Rx unit of the MAC after a reset.
1405 */
1406static int atl1c_configure_mac(struct atl1c_adapter *adapter)
1407{
1408	struct atl1c_hw *hw = &adapter->hw;
1409	u32 master_ctrl_data = 0;
1410	u32 intr_modrt_data;
1411	u32 data;
1412
1413	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
1414	master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN |
1415			      MASTER_CTRL_RX_ITIMER_EN |
1416			      MASTER_CTRL_INT_RDCLR);
1417	/* clear interrupt status */
1418	AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
1419	/*  Clear any WOL status */
1420	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
1421	/* set Interrupt Clear Timer
1422	 * HW will enable self to assert interrupt event to system after
1423	 * waiting x-time for software to notify it accept interrupt.
1424	 */
1425
1426	data = CLK_GATING_EN_ALL;
1427	if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
1428		if (hw->nic_type == athr_l2c_b)
1429			data &= ~CLK_GATING_RXMAC_EN;
1430	} else
1431		data = 0;
1432	AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
1433
1434	AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
1435		hw->ict & INT_RETRIG_TIMER_MASK);
1436
1437	atl1c_configure_des_ring(adapter);
1438
1439	if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) {
1440		intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) <<
1441					IRQ_MODRT_TX_TIMER_SHIFT;
1442		intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) <<
1443					IRQ_MODRT_RX_TIMER_SHIFT;
1444		AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1445		master_ctrl_data |=
1446			MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN;
1447	}
1448
1449	if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
1450		master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
1451
1452	master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
1453	AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1454
1455	AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1456		hw->smb_timer & SMB_STAT_TIMER_MASK);
1457
1458	/* set MTU */
1459	AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1460			VLAN_HLEN + ETH_FCS_LEN);
1461
1462	atl1c_configure_tx(adapter);
1463	atl1c_configure_rx(adapter);
1464	atl1c_configure_dma(adapter);
1465
1466	return 0;
1467}
1468
1469static int atl1c_configure(struct atl1c_adapter *adapter)
1470{
1471	struct net_device *netdev = adapter->netdev;
1472	int num;
1473	int i;
1474
1475	if (adapter->hw.nic_type == athr_mt) {
1476		u32 mode;
1477
1478		AT_READ_REG(&adapter->hw, REG_MT_MODE, &mode);
1479		if (adapter->rx_queue_count == 4)
1480			mode |= MT_MODE_4Q;
1481		else
1482			mode &= ~MT_MODE_4Q;
1483		AT_WRITE_REG(&adapter->hw, REG_MT_MODE, mode);
1484	}
1485
1486	atl1c_init_ring_ptrs(adapter);
1487	atl1c_set_multi(netdev);
1488	atl1c_restore_vlan(adapter);
1489
1490	for (i = 0; i < adapter->rx_queue_count; ++i) {
1491		num = atl1c_alloc_rx_buffer(adapter, i, false);
1492		if (unlikely(num == 0))
1493			return -ENOMEM;
1494	}
1495
1496	if (atl1c_configure_mac(adapter))
1497		return -EIO;
1498
1499	return 0;
1500}
1501
1502static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
1503{
1504	u16 hw_reg_addr = 0;
1505	unsigned long *stats_item = NULL;
1506	u32 data;
1507
1508	/* update rx status */
1509	hw_reg_addr = REG_MAC_RX_STATUS_BIN;
1510	stats_item  = &adapter->hw_stats.rx_ok;
1511	while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
1512		AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1513		*stats_item += data;
1514		stats_item++;
1515		hw_reg_addr += 4;
1516	}
1517/* update tx status */
1518	hw_reg_addr = REG_MAC_TX_STATUS_BIN;
1519	stats_item  = &adapter->hw_stats.tx_ok;
1520	while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
1521		AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1522		*stats_item += data;
1523		stats_item++;
1524		hw_reg_addr += 4;
1525	}
1526}
1527
1528/**
1529 * atl1c_get_stats - Get System Network Statistics
1530 * @netdev: network interface device structure
1531 *
1532 * Returns the address of the device statistics structure.
1533 * The statistics are actually updated from the timer callback.
1534 */
1535static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
1536{
1537	struct atl1c_adapter *adapter = netdev_priv(netdev);
1538	struct atl1c_hw_stats  *hw_stats = &adapter->hw_stats;
1539	struct net_device_stats *net_stats = &netdev->stats;
1540
1541	atl1c_update_hw_stats(adapter);
1542	net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
1543	net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
1544	net_stats->multicast  = hw_stats->rx_mcast;
1545	net_stats->collisions = hw_stats->tx_1_col +
1546				hw_stats->tx_2_col +
1547				hw_stats->tx_late_col +
1548				hw_stats->tx_abort_col;
1549
1550	net_stats->rx_errors  = hw_stats->rx_frag +
1551				hw_stats->rx_fcs_err +
1552				hw_stats->rx_len_err +
1553				hw_stats->rx_sz_ov +
1554				hw_stats->rx_rrd_ov +
1555				hw_stats->rx_align_err +
1556				hw_stats->rx_rxf_ov;
1557
1558	net_stats->rx_fifo_errors   = hw_stats->rx_rxf_ov;
1559	net_stats->rx_length_errors = hw_stats->rx_len_err;
1560	net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
1561	net_stats->rx_frame_errors  = hw_stats->rx_align_err;
1562	net_stats->rx_dropped       = hw_stats->rx_rrd_ov;
1563
1564	net_stats->tx_errors = hw_stats->tx_late_col +
1565			       hw_stats->tx_abort_col +
1566			       hw_stats->tx_underrun +
1567			       hw_stats->tx_trunc;
1568
1569	net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
1570	net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1571	net_stats->tx_window_errors  = hw_stats->tx_late_col;
1572
1573	net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1574	net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1575
1576	return net_stats;
1577}
1578
1579static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
1580{
1581	u16 phy_data;
1582
1583	spin_lock(&adapter->mdio_lock);
1584	atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data);
1585	spin_unlock(&adapter->mdio_lock);
1586}
1587
1588static int atl1c_clean_tx(struct napi_struct *napi, int budget)
1589{
1590	struct atl1c_tpd_ring *tpd_ring =
1591		container_of(napi, struct atl1c_tpd_ring, napi);
1592	struct atl1c_adapter *adapter = tpd_ring->adapter;
1593	struct netdev_queue *txq =
1594		netdev_get_tx_queue(napi->dev, tpd_ring->num);
1595	struct atl1c_buffer *buffer_info;
1596	struct pci_dev *pdev = adapter->pdev;
1597	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1598	u16 hw_next_to_clean;
1599	unsigned int total_bytes = 0, total_packets = 0;
1600	unsigned long flags;
1601
1602	AT_READ_REGW(&adapter->hw, atl1c_qregs[tpd_ring->num].tpd_cons,
1603		     &hw_next_to_clean);
1604
1605	while (next_to_clean != hw_next_to_clean) {
1606		buffer_info = &tpd_ring->buffer_info[next_to_clean];
1607		if (buffer_info->skb) {
1608			total_bytes += buffer_info->skb->len;
1609			total_packets++;
1610		}
1611		atl1c_clean_buffer(pdev, buffer_info, budget);
1612		if (++next_to_clean == tpd_ring->count)
1613			next_to_clean = 0;
1614		atomic_set(&tpd_ring->next_to_clean, next_to_clean);
1615	}
1616
1617	netdev_tx_completed_queue(txq, total_packets, total_bytes);
1618
1619	if (netif_tx_queue_stopped(txq) && netif_carrier_ok(adapter->netdev))
1620		netif_tx_wake_queue(txq);
1621
1622	if (total_packets < budget) {
1623		napi_complete_done(napi, total_packets);
1624		spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
1625		adapter->hw.intr_mask |= atl1c_qregs[tpd_ring->num].tx_isr;
1626		AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
1627		spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
1628		return total_packets;
1629	}
1630	return budget;
1631}
1632
1633static void atl1c_intr_rx_tx(struct atl1c_adapter *adapter, u32 status)
1634{
1635	struct atl1c_hw *hw = &adapter->hw;
1636	u32 intr_mask;
1637	int i;
1638
1639	spin_lock(&hw->intr_mask_lock);
1640	intr_mask = hw->intr_mask;
1641	for (i = 0; i < adapter->rx_queue_count; ++i) {
1642		if (!(status & atl1c_qregs[i].rx_isr))
1643			continue;
1644		if (napi_schedule_prep(&adapter->rrd_ring[i].napi)) {
1645			intr_mask &= ~atl1c_qregs[i].rx_isr;
1646			__napi_schedule(&adapter->rrd_ring[i].napi);
1647		}
1648	}
1649	for (i = 0; i < adapter->tx_queue_count; ++i) {
1650		if (!(status & atl1c_qregs[i].tx_isr))
1651			continue;
1652		if (napi_schedule_prep(&adapter->tpd_ring[i].napi)) {
1653			intr_mask &= ~atl1c_qregs[i].tx_isr;
1654			__napi_schedule(&adapter->tpd_ring[i].napi);
1655		}
1656	}
1657
1658	if (hw->intr_mask != intr_mask) {
1659		hw->intr_mask = intr_mask;
1660		AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1661	}
1662	spin_unlock(&hw->intr_mask_lock);
1663}
1664
1665/**
1666 * atl1c_intr - Interrupt Handler
1667 * @irq: interrupt number
1668 * @data: pointer to a network interface device structure
1669 */
1670static irqreturn_t atl1c_intr(int irq, void *data)
1671{
1672	struct net_device *netdev  = data;
1673	struct atl1c_adapter *adapter = netdev_priv(netdev);
1674	struct pci_dev *pdev = adapter->pdev;
1675	struct atl1c_hw *hw = &adapter->hw;
1676	int max_ints = AT_MAX_INT_WORK;
1677	int handled = IRQ_NONE;
1678	u32 status;
1679	u32 reg_data;
1680
1681	do {
1682		AT_READ_REG(hw, REG_ISR, &reg_data);
1683		status = reg_data & hw->intr_mask;
1684
1685		if (status == 0 || (status & ISR_DIS_INT) != 0) {
1686			if (max_ints != AT_MAX_INT_WORK)
1687				handled = IRQ_HANDLED;
1688			break;
1689		}
1690		/* link event */
1691		if (status & ISR_GPHY)
1692			atl1c_clear_phy_int(adapter);
1693		/* Ack ISR */
1694		AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
1695		if (status & (ISR_RX_PKT | ISR_TX_PKT))
1696			atl1c_intr_rx_tx(adapter, status);
1697
1698		handled = IRQ_HANDLED;
1699		/* check if PCIE PHY Link down */
1700		if (status & ISR_ERROR) {
1701			if (netif_msg_hw(adapter))
1702				dev_err(&pdev->dev,
1703					"atl1c hardware error (status = 0x%x)\n",
1704					status & ISR_ERROR);
1705			/* reset MAC */
1706			set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
1707			schedule_work(&adapter->common_task);
1708			return IRQ_HANDLED;
1709		}
1710
1711		if (status & ISR_OVER)
1712			if (netif_msg_intr(adapter))
1713				dev_warn(&pdev->dev,
1714					"TX/RX overflow (status = 0x%x)\n",
1715					status & ISR_OVER);
1716
1717		/* link event */
1718		if (status & (ISR_GPHY | ISR_MANUAL)) {
1719			netdev->stats.tx_carrier_errors++;
1720			atl1c_link_chg_event(adapter);
1721			break;
1722		}
1723
1724	} while (--max_ints > 0);
1725	/* re-enable Interrupt*/
1726	AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
1727	return handled;
1728}
1729
1730static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1731		  struct sk_buff *skb, struct atl1c_recv_ret_status *prrs)
1732{
1733	if (adapter->hw.nic_type == athr_mt) {
1734		if (prrs->word3 & RRS_MT_PROT_ID_TCPUDP)
1735			skb->ip_summed = CHECKSUM_UNNECESSARY;
1736		return;
1737	}
1738	/*
1739	 * The pid field in RRS in not correct sometimes, so we
1740	 * cannot figure out if the packet is fragmented or not,
1741	 * so we tell the KERNEL CHECKSUM_NONE
1742	 */
1743	skb_checksum_none_assert(skb);
1744}
1745
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1746static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
1747				 bool napi_mode)
1748{
1749	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
1750	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
1751	struct pci_dev *pdev = adapter->pdev;
1752	struct atl1c_buffer *buffer_info, *next_info;
1753	struct sk_buff *skb;
1754	void *vir_addr = NULL;
1755	u16 num_alloc = 0;
1756	u16 rfd_next_to_use, next_next;
1757	struct atl1c_rx_free_desc *rfd_desc;
1758	dma_addr_t mapping;
1759
1760	next_next = rfd_next_to_use = rfd_ring->next_to_use;
1761	if (++next_next == rfd_ring->count)
1762		next_next = 0;
1763	buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1764	next_info = &rfd_ring->buffer_info[next_next];
1765
1766	while (next_info->flags & ATL1C_BUFFER_FREE) {
1767		rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
1768
1769		/* When DMA RX address is set to something like
1770		 * 0x....fc0, it will be very likely to cause DMA
1771		 * RFD overflow issue.
1772		 *
1773		 * To work around it, we apply rx skb with 64 bytes
1774		 * longer space, and offset the address whenever
1775		 * 0x....fc0 is detected.
1776		 */
1777		if (likely(napi_mode))
1778			skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
1779		else
1780			skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
1781		if (unlikely(!skb)) {
1782			if (netif_msg_rx_err(adapter))
1783				dev_warn(&pdev->dev, "alloc rx buffer failed\n");
1784			break;
1785		}
1786
1787		if (((unsigned long)skb->data & 0xfff) == 0xfc0)
1788			skb_reserve(skb, 64);
1789
1790		/*
1791		 * Make buffer alignment 2 beyond a 16 byte boundary
1792		 * this will result in a 16 byte aligned IP header after
1793		 * the 14 byte MAC header is removed
1794		 */
1795		vir_addr = skb->data;
1796		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1797		buffer_info->skb = skb;
1798		buffer_info->length = adapter->rx_buffer_len;
1799		mapping = dma_map_single(&pdev->dev, vir_addr,
1800					 buffer_info->length, DMA_FROM_DEVICE);
1801		if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
1802			dev_kfree_skb(skb);
1803			buffer_info->skb = NULL;
1804			buffer_info->length = 0;
1805			ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
1806			netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed");
1807			break;
1808		}
1809		buffer_info->dma = mapping;
1810		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
1811			ATL1C_PCIMAP_FROMDEVICE);
1812		rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1813		rfd_next_to_use = next_next;
1814		if (++next_next == rfd_ring->count)
1815			next_next = 0;
1816		buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1817		next_info = &rfd_ring->buffer_info[next_next];
1818		num_alloc++;
1819	}
1820
1821	if (num_alloc) {
1822		/* TODO: update mailbox here */
1823		wmb();
1824		rfd_ring->next_to_use = rfd_next_to_use;
1825		AT_WRITE_REG(&adapter->hw, atl1c_qregs[queue].rfd_prod,
1826			     rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
1827	}
1828
1829	return num_alloc;
1830}
1831
1832static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring,
1833			struct	atl1c_recv_ret_status *rrs, u16 num)
1834{
1835	u16 i;
1836	/* the relationship between rrd and rfd is one map one */
1837	for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring,
1838					rrd_ring->next_to_clean)) {
1839		rrs->word3 &= ~RRS_RXD_UPDATED;
1840		if (++rrd_ring->next_to_clean == rrd_ring->count)
1841			rrd_ring->next_to_clean = 0;
1842	}
1843}
1844
1845static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1846	struct atl1c_recv_ret_status *rrs, u16 num)
1847{
1848	u16 i;
1849	u16 rfd_index;
1850	struct atl1c_buffer *buffer_info = rfd_ring->buffer_info;
1851
1852	rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1853			RRS_RX_RFD_INDEX_MASK;
1854	for (i = 0; i < num; i++) {
1855		buffer_info[rfd_index].skb = NULL;
1856		ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index],
1857					ATL1C_BUFFER_FREE);
1858		if (++rfd_index == rfd_ring->count)
1859			rfd_index = 0;
1860	}
1861	rfd_ring->next_to_clean = rfd_index;
1862}
1863
1864/**
1865 * atl1c_clean_rx - NAPI Rx polling callback
1866 * @napi: napi info
1867 * @budget: limit of packets to clean
1868 */
1869static int atl1c_clean_rx(struct napi_struct *napi, int budget)
1870{
1871	struct atl1c_rrd_ring *rrd_ring =
1872		container_of(napi, struct atl1c_rrd_ring, napi);
1873	struct atl1c_adapter *adapter = rrd_ring->adapter;
1874	u16 rfd_num, rfd_index;
1875	u16 length;
1876	struct pci_dev *pdev = adapter->pdev;
1877	struct net_device *netdev  = adapter->netdev;
1878	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[rrd_ring->num];
1879	struct sk_buff *skb;
1880	struct atl1c_recv_ret_status *rrs;
1881	struct atl1c_buffer *buffer_info;
1882	int work_done = 0;
1883	unsigned long flags;
1884
1885	/* Keep link state information with original netdev */
1886	if (!netif_carrier_ok(adapter->netdev))
1887		goto quit_polling;
1888
1889	while (1) {
1890		if (work_done >= budget)
1891			break;
1892		rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean);
1893		if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
1894			rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
1895				RRS_RX_RFD_CNT_MASK;
1896			if (unlikely(rfd_num != 1))
1897				/* TODO support mul rfd*/
1898				if (netif_msg_rx_err(adapter))
1899					dev_warn(&pdev->dev,
1900						"Multi rfd not support yet!\n");
1901			goto rrs_checked;
1902		} else {
1903			break;
1904		}
1905rrs_checked:
1906		atl1c_clean_rrd(rrd_ring, rrs, rfd_num);
1907		if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) {
1908			atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1909			if (netif_msg_rx_err(adapter))
1910				dev_warn(&pdev->dev,
1911					 "wrong packet! rrs word3 is %x\n",
1912					 rrs->word3);
1913			continue;
1914		}
1915
1916		length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) &
1917				RRS_PKT_SIZE_MASK);
1918		/* Good Receive */
1919		if (likely(rfd_num == 1)) {
1920			rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1921					RRS_RX_RFD_INDEX_MASK;
1922			buffer_info = &rfd_ring->buffer_info[rfd_index];
1923			dma_unmap_single(&pdev->dev, buffer_info->dma,
1924					 buffer_info->length, DMA_FROM_DEVICE);
1925			skb = buffer_info->skb;
1926		} else {
1927			/* TODO */
1928			if (netif_msg_rx_err(adapter))
1929				dev_warn(&pdev->dev,
1930					"Multi rfd not support yet!\n");
1931			break;
1932		}
1933		atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1934		skb_put(skb, length - ETH_FCS_LEN);
1935		skb->protocol = eth_type_trans(skb, netdev);
1936		atl1c_rx_checksum(adapter, skb, rrs);
1937		if (rrs->word3 & RRS_VLAN_INS) {
1938			u16 vlan;
1939
1940			AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
1941			vlan = le16_to_cpu(vlan);
1942			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
1943		}
1944		napi_gro_receive(napi, skb);
1945
1946		work_done++;
1947	}
1948	if (work_done)
1949		atl1c_alloc_rx_buffer(adapter, rrd_ring->num, true);
1950
1951	if (work_done < budget) {
1952quit_polling:
1953		napi_complete_done(napi, work_done);
1954		spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
1955		adapter->hw.intr_mask |= atl1c_qregs[rrd_ring->num].rx_isr;
1956		AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
1957		spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
1958	}
1959	return work_done;
1960}
1961
1962#ifdef CONFIG_NET_POLL_CONTROLLER
1963
1964/*
1965 * Polling 'interrupt' - used by things like netconsole to send skbs
1966 * without having to re-enable interrupts. It's not called while
1967 * the interrupt routine is executing.
1968 */
1969static void atl1c_netpoll(struct net_device *netdev)
1970{
1971	struct atl1c_adapter *adapter = netdev_priv(netdev);
1972
1973	disable_irq(adapter->pdev->irq);
1974	atl1c_intr(adapter->pdev->irq, netdev);
1975	enable_irq(adapter->pdev->irq);
1976}
1977#endif
1978
1979static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, u32 queue)
1980{
1981	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
1982	u16 next_to_use = 0;
1983	u16 next_to_clean = 0;
1984
1985	next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1986	next_to_use   = tpd_ring->next_to_use;
1987
1988	return (u16)(next_to_clean > next_to_use) ?
1989		(next_to_clean - next_to_use - 1) :
1990		(tpd_ring->count + next_to_clean - next_to_use - 1);
1991}
1992
1993/*
1994 * get next usable tpd
1995 * Note: should call atl1c_tdp_avail to make sure
1996 * there is enough tpd to use
1997 */
1998static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter,
1999					    u32 queue)
2000{
2001	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
2002	struct atl1c_tpd_desc *tpd_desc;
2003	u16 next_to_use = 0;
2004
2005	next_to_use = tpd_ring->next_to_use;
2006	if (++tpd_ring->next_to_use == tpd_ring->count)
2007		tpd_ring->next_to_use = 0;
2008	tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use);
2009	memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc));
2010	return	tpd_desc;
2011}
2012
2013static struct atl1c_buffer *
2014atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd)
2015{
2016	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
2017
2018	return &tpd_ring->buffer_info[tpd -
2019			(struct atl1c_tpd_desc *)tpd_ring->desc];
2020}
2021
2022/* Calculate the transmit packet descript needed*/
2023static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
2024{
2025	u16 tpd_req;
2026	u16 proto_hdr_len = 0;
2027
2028	tpd_req = skb_shinfo(skb)->nr_frags + 1;
2029
2030	if (skb_is_gso(skb)) {
2031		proto_hdr_len = skb_tcp_all_headers(skb);
2032		if (proto_hdr_len < skb_headlen(skb))
2033			tpd_req++;
2034		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
2035			tpd_req++;
2036	}
2037	return tpd_req;
2038}
2039
2040static int atl1c_tso_csum(struct atl1c_adapter *adapter,
2041			  struct sk_buff *skb,
2042			  struct atl1c_tpd_desc **tpd,
2043			  u32 queue)
2044{
2045	struct pci_dev *pdev = adapter->pdev;
2046	unsigned short offload_type;
2047	u8 hdr_len;
2048	u32 real_len;
2049
2050	if (skb_is_gso(skb)) {
2051		int err;
2052
2053		err = skb_cow_head(skb, 0);
2054		if (err < 0)
2055			return err;
2056
2057		offload_type = skb_shinfo(skb)->gso_type;
2058
2059		if (offload_type & SKB_GSO_TCPV4) {
2060			real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
2061					+ ntohs(ip_hdr(skb)->tot_len));
2062
2063			if (real_len < skb->len) {
2064				err = pskb_trim(skb, real_len);
2065				if (err)
2066					return err;
2067			}
2068
2069			hdr_len = skb_tcp_all_headers(skb);
2070			if (unlikely(skb->len == hdr_len)) {
2071				/* only xsum need */
2072				if (netif_msg_tx_queued(adapter))
2073					dev_warn(&pdev->dev,
2074						"IPV4 tso with zero data??\n");
2075				goto check_sum;
2076			} else {
2077				ip_hdr(skb)->check = 0;
2078				tcp_hdr(skb)->check = ~csum_tcpudp_magic(
2079							ip_hdr(skb)->saddr,
2080							ip_hdr(skb)->daddr,
2081							0, IPPROTO_TCP, 0);
2082				(*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT;
2083			}
2084		}
2085
2086		if (offload_type & SKB_GSO_TCPV6) {
2087			struct atl1c_tpd_ext_desc *etpd =
2088				*(struct atl1c_tpd_ext_desc **)(tpd);
2089
2090			memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
2091			*tpd = atl1c_get_tpd(adapter, queue);
2092			ipv6_hdr(skb)->payload_len = 0;
2093			/* check payload == 0 byte ? */
2094			hdr_len = skb_tcp_all_headers(skb);
2095			if (unlikely(skb->len == hdr_len)) {
2096				/* only xsum need */
2097				if (netif_msg_tx_queued(adapter))
2098					dev_warn(&pdev->dev,
2099						"IPV6 tso with zero data??\n");
2100				goto check_sum;
2101			} else
2102				tcp_v6_gso_csum_prep(skb);
2103
2104			etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
2105			etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
2106			etpd->pkt_len = cpu_to_le32(skb->len);
2107			(*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT;
2108		}
2109
2110		(*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT;
2111		(*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) <<
2112				TPD_TCPHDR_OFFSET_SHIFT;
2113		(*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) <<
2114				TPD_MSS_SHIFT;
2115		return 0;
2116	}
2117
2118check_sum:
2119	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2120		u8 css, cso;
2121		cso = skb_checksum_start_offset(skb);
2122
2123		if (unlikely(cso & 0x1)) {
2124			if (netif_msg_tx_err(adapter))
2125				dev_err(&adapter->pdev->dev,
2126					"payload offset should not an event number\n");
2127			return -1;
2128		} else {
2129			css = cso + skb->csum_offset;
2130
2131			(*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) <<
2132					TPD_PLOADOFFSET_SHIFT;
2133			(*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) <<
2134					TPD_CCSUM_OFFSET_SHIFT;
2135			(*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT;
2136		}
2137	}
2138	return 0;
2139}
2140
2141static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
2142			      struct atl1c_tpd_desc *first_tpd,
2143			      u32 queue)
2144{
2145	struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[queue];
2146	struct atl1c_buffer *buffer_info;
2147	struct atl1c_tpd_desc *tpd;
2148	u16 first_index, index;
2149
2150	first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
2151	index = first_index;
2152	while (index != tpd_ring->next_to_use) {
2153		tpd = ATL1C_TPD_DESC(tpd_ring, index);
2154		buffer_info = &tpd_ring->buffer_info[index];
2155		atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
2156		memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
2157		if (++index == tpd_ring->count)
2158			index = 0;
2159	}
2160	tpd_ring->next_to_use = first_index;
2161}
2162
2163static int atl1c_tx_map(struct atl1c_adapter *adapter,
2164			struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
2165			u32 queue)
2166{
2167	struct atl1c_tpd_desc *use_tpd = NULL;
2168	struct atl1c_buffer *buffer_info = NULL;
2169	u16 buf_len = skb_headlen(skb);
2170	u16 map_len = 0;
2171	u16 mapped_len = 0;
2172	u16 hdr_len = 0;
2173	u16 nr_frags;
2174	u16 f;
2175	int tso;
2176
2177	nr_frags = skb_shinfo(skb)->nr_frags;
2178	tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
2179	if (tso) {
2180		/* TSO */
2181		hdr_len = skb_tcp_all_headers(skb);
2182		map_len = hdr_len;
2183		use_tpd = tpd;
2184
2185		buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2186		buffer_info->length = map_len;
2187		buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2188						  skb->data, hdr_len,
2189						  DMA_TO_DEVICE);
2190		if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
2191			goto err_dma;
2192		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2193		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
2194			ATL1C_PCIMAP_TODEVICE);
2195		mapped_len += map_len;
2196		use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2197		use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
2198	}
2199
2200	if (mapped_len < buf_len) {
2201		/* mapped_len == 0, means we should use the first tpd,
2202		   which is given by caller  */
2203		if (mapped_len == 0)
2204			use_tpd = tpd;
2205		else {
2206			use_tpd = atl1c_get_tpd(adapter, queue);
2207			memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2208		}
2209		buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2210		buffer_info->length = buf_len - mapped_len;
2211		buffer_info->dma =
2212			dma_map_single(&adapter->pdev->dev,
2213				       skb->data + mapped_len,
2214				       buffer_info->length, DMA_TO_DEVICE);
2215		if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
2216			goto err_dma;
2217
2218		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2219		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
2220			ATL1C_PCIMAP_TODEVICE);
2221		use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2222		use_tpd->buffer_len  = cpu_to_le16(buffer_info->length);
2223	}
2224
2225	for (f = 0; f < nr_frags; f++) {
2226		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2227
2228		use_tpd = atl1c_get_tpd(adapter, queue);
2229		memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2230
2231		buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2232		buffer_info->length = skb_frag_size(frag);
2233		buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
2234						    frag, 0,
2235						    buffer_info->length,
2236						    DMA_TO_DEVICE);
2237		if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
2238			goto err_dma;
2239
2240		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2241		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
2242			ATL1C_PCIMAP_TODEVICE);
2243		use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2244		use_tpd->buffer_len  = cpu_to_le16(buffer_info->length);
2245	}
2246
2247	/* The last tpd */
2248	use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
2249	/* The last buffer info contain the skb address,
2250	   so it will be free after unmap */
2251	buffer_info->skb = skb;
2252
2253	return 0;
2254
2255err_dma:
2256	buffer_info->dma = 0;
2257	buffer_info->length = 0;
2258	return -1;
2259}
2260
2261static void atl1c_tx_queue(struct atl1c_adapter *adapter, u32 queue)
2262{
2263	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
2264
2265	AT_WRITE_REGW(&adapter->hw, atl1c_qregs[queue].tpd_prod,
2266		      tpd_ring->next_to_use);
2267}
2268
2269static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
2270					  struct net_device *netdev)
2271{
2272	struct atl1c_adapter *adapter = netdev_priv(netdev);
2273	u32 queue = skb_get_queue_mapping(skb);
2274	struct netdev_queue *txq = netdev_get_tx_queue(netdev, queue);
2275	struct atl1c_tpd_desc *tpd;
2276	u16 tpd_req;
2277
2278	if (test_bit(__AT_DOWN, &adapter->flags)) {
2279		dev_kfree_skb_any(skb);
2280		return NETDEV_TX_OK;
2281	}
2282
2283	tpd_req = atl1c_cal_tpd_req(skb);
2284
2285	if (atl1c_tpd_avail(adapter, queue) < tpd_req) {
2286		/* no enough descriptor, just stop queue */
2287		atl1c_tx_queue(adapter, queue);
2288		netif_tx_stop_queue(txq);
2289		return NETDEV_TX_BUSY;
2290	}
2291
2292	tpd = atl1c_get_tpd(adapter, queue);
2293
2294	/* do TSO and check sum */
2295	if (atl1c_tso_csum(adapter, skb, &tpd, queue) != 0) {
2296		atl1c_tx_queue(adapter, queue);
2297		dev_kfree_skb_any(skb);
2298		return NETDEV_TX_OK;
2299	}
2300
2301	if (unlikely(skb_vlan_tag_present(skb))) {
2302		u16 vlan = skb_vlan_tag_get(skb);
2303		__le16 tag;
2304
2305		vlan = cpu_to_le16(vlan);
2306		AT_VLAN_TO_TAG(vlan, tag);
2307		tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT;
2308		tpd->vlan_tag = tag;
2309	}
2310
2311	if (skb_network_offset(skb) != ETH_HLEN)
2312		tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
2313
2314	if (atl1c_tx_map(adapter, skb, tpd, queue) < 0) {
2315		netif_info(adapter, tx_done, adapter->netdev,
2316			   "tx-skb dropped due to dma error\n");
2317		/* roll back tpd/buffer */
2318		atl1c_tx_rollback(adapter, tpd, queue);
2319		dev_kfree_skb_any(skb);
2320	} else {
2321		bool more = netdev_xmit_more();
2322
2323		if (__netdev_tx_sent_queue(txq, skb->len, more))
2324			atl1c_tx_queue(adapter, queue);
2325	}
2326
2327	return NETDEV_TX_OK;
2328}
2329
2330static void atl1c_free_irq(struct atl1c_adapter *adapter)
2331{
2332	struct net_device *netdev = adapter->netdev;
2333
2334	free_irq(adapter->pdev->irq, netdev);
2335
2336	if (adapter->have_msi)
2337		pci_disable_msi(adapter->pdev);
2338}
2339
2340static int atl1c_request_irq(struct atl1c_adapter *adapter)
2341{
2342	struct pci_dev    *pdev   = adapter->pdev;
2343	struct net_device *netdev = adapter->netdev;
2344	int flags = 0;
2345	int err = 0;
2346
2347	adapter->have_msi = true;
2348	err = pci_enable_msi(adapter->pdev);
2349	if (err) {
2350		if (netif_msg_ifup(adapter))
2351			dev_err(&pdev->dev,
2352				"Unable to allocate MSI interrupt Error: %d\n",
2353				err);
2354		adapter->have_msi = false;
2355	}
2356
2357	if (!adapter->have_msi)
2358		flags |= IRQF_SHARED;
2359	err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
2360			netdev->name, netdev);
2361	if (err) {
2362		if (netif_msg_ifup(adapter))
2363			dev_err(&pdev->dev,
2364				"Unable to allocate interrupt Error: %d\n",
2365				err);
2366		if (adapter->have_msi)
2367			pci_disable_msi(adapter->pdev);
2368		return err;
2369	}
2370	if (netif_msg_ifup(adapter))
2371		dev_dbg(&pdev->dev, "atl1c_request_irq OK\n");
2372	return err;
2373}
2374
2375
2376static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter)
2377{
2378	int i;
2379	/* release tx-pending skbs and reset tx/rx ring index */
2380	for (i = 0; i < adapter->tx_queue_count; ++i)
2381		atl1c_clean_tx_ring(adapter, i);
2382	for (i = 0; i < adapter->rx_queue_count; ++i)
2383		atl1c_clean_rx_ring(adapter, i);
2384}
2385
2386static int atl1c_up(struct atl1c_adapter *adapter)
2387{
2388	struct net_device *netdev = adapter->netdev;
2389	int err;
2390	int i;
2391
2392	netif_carrier_off(netdev);
2393
2394	err = atl1c_configure(adapter);
2395	if (unlikely(err))
2396		goto err_up;
2397
2398	err = atl1c_request_irq(adapter);
2399	if (unlikely(err))
2400		goto err_up;
2401
2402	atl1c_check_link_status(adapter);
2403	clear_bit(__AT_DOWN, &adapter->flags);
2404	for (i = 0; i < adapter->tx_queue_count; ++i)
2405		napi_enable(&adapter->tpd_ring[i].napi);
2406	for (i = 0; i < adapter->rx_queue_count; ++i)
2407		napi_enable(&adapter->rrd_ring[i].napi);
2408	atl1c_irq_enable(adapter);
2409	netif_start_queue(netdev);
2410	return err;
2411
2412err_up:
2413	for (i = 0; i < adapter->rx_queue_count; ++i)
2414		atl1c_clean_rx_ring(adapter, i);
2415	return err;
2416}
2417
2418static void atl1c_down(struct atl1c_adapter *adapter)
2419{
2420	struct net_device *netdev = adapter->netdev;
2421	int i;
2422
2423	atl1c_del_timer(adapter);
2424	adapter->work_event = 0; /* clear all event */
2425	/* signal that we're down so the interrupt handler does not
2426	 * reschedule our watchdog timer */
2427	set_bit(__AT_DOWN, &adapter->flags);
2428	netif_carrier_off(netdev);
2429	for (i = 0; i < adapter->tx_queue_count; ++i)
2430		napi_disable(&adapter->tpd_ring[i].napi);
2431	for (i = 0; i < adapter->rx_queue_count; ++i)
2432		napi_disable(&adapter->rrd_ring[i].napi);
2433	atl1c_irq_disable(adapter);
2434	atl1c_free_irq(adapter);
2435	/* disable ASPM if device inactive */
2436	atl1c_disable_l0s_l1(&adapter->hw);
2437	/* reset MAC to disable all RX/TX */
2438	atl1c_reset_mac(&adapter->hw);
2439	msleep(1);
2440
2441	adapter->link_speed = SPEED_0;
2442	adapter->link_duplex = -1;
2443	atl1c_reset_dma_ring(adapter);
2444}
2445
2446/**
2447 * atl1c_open - Called when a network interface is made active
2448 * @netdev: network interface device structure
2449 *
2450 * Returns 0 on success, negative value on failure
2451 *
2452 * The open entry point is called when a network interface is made
2453 * active by the system (IFF_UP).  At this point all resources needed
2454 * for transmit and receive operations are allocated, the interrupt
2455 * handler is registered with the OS, the watchdog timer is started,
2456 * and the stack is notified that the interface is ready.
2457 */
2458static int atl1c_open(struct net_device *netdev)
2459{
2460	struct atl1c_adapter *adapter = netdev_priv(netdev);
2461	int err;
2462
2463	/* disallow open during test */
2464	if (test_bit(__AT_TESTING, &adapter->flags))
2465		return -EBUSY;
2466
2467	/* allocate rx/tx dma buffer & descriptors */
2468	err = atl1c_setup_ring_resources(adapter);
2469	if (unlikely(err))
2470		return err;
2471
2472	err = atl1c_up(adapter);
2473	if (unlikely(err))
2474		goto err_up;
2475
2476	return 0;
2477
2478err_up:
2479	atl1c_free_irq(adapter);
2480	atl1c_free_ring_resources(adapter);
2481	atl1c_reset_mac(&adapter->hw);
2482	return err;
2483}
2484
2485/**
2486 * atl1c_close - Disables a network interface
2487 * @netdev: network interface device structure
2488 *
2489 * Returns 0, this is not allowed to fail
2490 *
2491 * The close entry point is called when an interface is de-activated
2492 * by the OS.  The hardware is still under the drivers control, but
2493 * needs to be disabled.  A global MAC reset is issued to stop the
2494 * hardware, and all transmit and receive resources are freed.
2495 */
2496static int atl1c_close(struct net_device *netdev)
2497{
2498	struct atl1c_adapter *adapter = netdev_priv(netdev);
2499
2500	WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2501	set_bit(__AT_DOWN, &adapter->flags);
2502	cancel_work_sync(&adapter->common_task);
2503	atl1c_down(adapter);
2504	atl1c_free_ring_resources(adapter);
2505	return 0;
2506}
2507
2508static int atl1c_suspend(struct device *dev)
2509{
2510	struct net_device *netdev = dev_get_drvdata(dev);
2511	struct atl1c_adapter *adapter = netdev_priv(netdev);
2512	struct atl1c_hw *hw = &adapter->hw;
2513	u32 wufc = adapter->wol;
2514
2515	atl1c_disable_l0s_l1(hw);
2516	if (netif_running(netdev)) {
2517		WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2518		atl1c_down(adapter);
2519	}
2520	netif_device_detach(netdev);
2521
2522	if (wufc)
2523		if (atl1c_phy_to_ps_link(hw) != 0)
2524			dev_dbg(dev, "phy power saving failed");
2525
2526	atl1c_power_saving(hw, wufc);
2527
2528	return 0;
2529}
2530
2531#ifdef CONFIG_PM_SLEEP
2532static int atl1c_resume(struct device *dev)
2533{
2534	struct net_device *netdev = dev_get_drvdata(dev);
2535	struct atl1c_adapter *adapter = netdev_priv(netdev);
2536
2537	AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2538	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2539
2540	atl1c_phy_reset(&adapter->hw);
2541	atl1c_reset_mac(&adapter->hw);
2542	atl1c_phy_init(&adapter->hw);
2543
2544	netif_device_attach(netdev);
2545	if (netif_running(netdev))
2546		atl1c_up(adapter);
2547
2548	return 0;
2549}
2550#endif
2551
2552static void atl1c_shutdown(struct pci_dev *pdev)
2553{
2554	struct net_device *netdev = pci_get_drvdata(pdev);
2555	struct atl1c_adapter *adapter = netdev_priv(netdev);
2556
2557	atl1c_suspend(&pdev->dev);
2558	pci_wake_from_d3(pdev, adapter->wol);
2559	pci_set_power_state(pdev, PCI_D3hot);
2560}
2561
2562static const struct net_device_ops atl1c_netdev_ops = {
2563	.ndo_open		= atl1c_open,
2564	.ndo_stop		= atl1c_close,
2565	.ndo_validate_addr	= eth_validate_addr,
2566	.ndo_start_xmit		= atl1c_xmit_frame,
2567	.ndo_set_mac_address	= atl1c_set_mac_addr,
2568	.ndo_set_rx_mode	= atl1c_set_multi,
2569	.ndo_change_mtu		= atl1c_change_mtu,
2570	.ndo_fix_features	= atl1c_fix_features,
2571	.ndo_set_features	= atl1c_set_features,
2572	.ndo_eth_ioctl		= atl1c_ioctl,
2573	.ndo_tx_timeout		= atl1c_tx_timeout,
2574	.ndo_get_stats		= atl1c_get_stats,
2575#ifdef CONFIG_NET_POLL_CONTROLLER
2576	.ndo_poll_controller	= atl1c_netpoll,
2577#endif
2578};
2579
2580static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2581{
2582	SET_NETDEV_DEV(netdev, &pdev->dev);
2583	pci_set_drvdata(pdev, netdev);
2584
2585	netdev->netdev_ops = &atl1c_netdev_ops;
2586	netdev->watchdog_timeo = AT_TX_WATCHDOG;
2587	netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
2588	atl1c_set_ethtool_ops(netdev);
2589
2590	/* TODO: add when ready */
2591	netdev->hw_features =	NETIF_F_SG		|
2592				NETIF_F_HW_CSUM		|
2593				NETIF_F_HW_VLAN_CTAG_RX	|
2594				NETIF_F_TSO		|
2595				NETIF_F_TSO6;
2596	netdev->features =	netdev->hw_features	|
2597				NETIF_F_HW_VLAN_CTAG_TX;
2598	return 0;
2599}
2600
2601/**
2602 * atl1c_probe - Device Initialization Routine
2603 * @pdev: PCI device information struct
2604 * @ent: entry in atl1c_pci_tbl
2605 *
2606 * Returns 0 on success, negative on failure
2607 *
2608 * atl1c_probe initializes an adapter identified by a pci_dev structure.
2609 * The OS initialization, configuring of the adapter private structure,
2610 * and a hardware reset occur.
2611 */
2612static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2613{
2614	struct net_device *netdev;
2615	struct atl1c_adapter *adapter;
2616	static int cards_found;
2617	u8 __iomem *hw_addr;
2618	enum atl1c_nic_type nic_type;
2619	u32 queue_count = 1;
2620	int err = 0;
2621	int i;
2622
2623	/* enable device (incl. PCI PM wakeup and hotplug setup) */
2624	err = pci_enable_device_mem(pdev);
2625	if (err)
2626		return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
2627
2628	/*
2629	 * The atl1c chip can DMA to 64-bit addresses, but it uses a single
2630	 * shared register for the high 32 bits, so only a single, aligned,
2631	 * 4 GB physical address range can be used at a time.
2632	 *
2633	 * Supporting 64-bit DMA on this hardware is more trouble than it's
2634	 * worth.  It is far easier to limit to 32-bit DMA than update
2635	 * various kernel subsystems to support the mechanics required by a
2636	 * fixed-high-32-bit system.
2637	 */
2638	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2639	if (err) {
2640		dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
2641		goto err_dma;
2642	}
2643
2644	err = pci_request_regions(pdev, atl1c_driver_name);
2645	if (err) {
2646		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2647		goto err_pci_reg;
2648	}
2649
2650	pci_set_master(pdev);
2651
2652	hw_addr = pci_ioremap_bar(pdev, 0);
2653	if (!hw_addr) {
2654		err = -EIO;
2655		dev_err(&pdev->dev, "cannot map device registers\n");
2656		goto err_ioremap;
2657	}
2658
2659	nic_type = atl1c_get_mac_type(pdev, hw_addr);
2660	if (nic_type == athr_mt)
2661		queue_count = 4;
2662
2663	netdev = alloc_etherdev_mq(sizeof(struct atl1c_adapter), queue_count);
2664	if (netdev == NULL) {
2665		err = -ENOMEM;
2666		goto err_alloc_etherdev;
2667	}
2668
2669	err = atl1c_init_netdev(netdev, pdev);
2670	if (err) {
2671		dev_err(&pdev->dev, "init netdevice failed\n");
2672		goto err_init_netdev;
2673	}
2674	adapter = netdev_priv(netdev);
2675	adapter->bd_number = cards_found;
2676	adapter->netdev = netdev;
2677	adapter->pdev = pdev;
2678	adapter->hw.adapter = adapter;
2679	adapter->hw.nic_type = nic_type;
2680	adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
2681	adapter->hw.hw_addr = hw_addr;
2682	adapter->tx_queue_count = queue_count;
2683	adapter->rx_queue_count = queue_count;
2684
2685	/* init mii data */
2686	adapter->mii.dev = netdev;
2687	adapter->mii.mdio_read  = atl1c_mdio_read;
2688	adapter->mii.mdio_write = atl1c_mdio_write;
2689	adapter->mii.phy_id_mask = 0x1f;
2690	adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
2691	dev_set_threaded(netdev, true);
2692	for (i = 0; i < adapter->rx_queue_count; ++i)
2693		netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
2694			       atl1c_clean_rx);
2695	for (i = 0; i < adapter->tx_queue_count; ++i)
2696		netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
2697				  atl1c_clean_tx);
2698	timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
2699	/* setup the private structure */
2700	err = atl1c_sw_init(adapter);
2701	if (err) {
2702		dev_err(&pdev->dev, "net device private data init failed\n");
2703		goto err_sw_init;
2704	}
2705	/* set max MTU */
2706	atl1c_set_max_mtu(netdev);
2707
2708	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2709
2710	/* Init GPHY as early as possible due to power saving issue  */
2711	atl1c_phy_reset(&adapter->hw);
2712
2713	err = atl1c_reset_mac(&adapter->hw);
2714	if (err) {
2715		err = -EIO;
2716		goto err_reset;
2717	}
2718
2719	/* reset the controller to
2720	 * put the device in a known good starting state */
2721	err = atl1c_phy_init(&adapter->hw);
2722	if (err) {
2723		err = -EIO;
2724		goto err_reset;
2725	}
2726	if (atl1c_read_mac_addr(&adapter->hw)) {
2727		/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
2728		netdev->addr_assign_type = NET_ADDR_RANDOM;
2729	}
2730	eth_hw_addr_set(netdev, adapter->hw.mac_addr);
2731	if (netif_msg_probe(adapter))
2732		dev_dbg(&pdev->dev, "mac address : %pM\n",
2733			adapter->hw.mac_addr);
2734
2735	atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
2736	INIT_WORK(&adapter->common_task, atl1c_common_task);
2737	adapter->work_event = 0;
2738	err = register_netdev(netdev);
2739	if (err) {
2740		dev_err(&pdev->dev, "register netdevice failed\n");
2741		goto err_register;
2742	}
2743
2744	cards_found++;
2745	return 0;
2746
2747err_reset:
2748err_register:
2749err_sw_init:
2750err_init_netdev:
2751	free_netdev(netdev);
2752err_alloc_etherdev:
2753	iounmap(hw_addr);
2754err_ioremap:
2755	pci_release_regions(pdev);
2756err_pci_reg:
2757err_dma:
2758	pci_disable_device(pdev);
2759	return err;
2760}
2761
2762/**
2763 * atl1c_remove - Device Removal Routine
2764 * @pdev: PCI device information struct
2765 *
2766 * atl1c_remove is called by the PCI subsystem to alert the driver
2767 * that it should release a PCI device.  The could be caused by a
2768 * Hot-Plug event, or because the driver is going to be removed from
2769 * memory.
2770 */
2771static void atl1c_remove(struct pci_dev *pdev)
2772{
2773	struct net_device *netdev = pci_get_drvdata(pdev);
2774	struct atl1c_adapter *adapter = netdev_priv(netdev);
2775
2776	unregister_netdev(netdev);
2777	/* restore permanent address */
2778	atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr);
2779	atl1c_phy_disable(&adapter->hw);
2780
2781	iounmap(adapter->hw.hw_addr);
2782
2783	pci_release_regions(pdev);
2784	pci_disable_device(pdev);
2785	free_netdev(netdev);
2786}
2787
2788/**
2789 * atl1c_io_error_detected - called when PCI error is detected
2790 * @pdev: Pointer to PCI device
2791 * @state: The current pci connection state
2792 *
2793 * This function is called after a PCI bus error affecting
2794 * this device has been detected.
2795 */
2796static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
2797						pci_channel_state_t state)
2798{
2799	struct net_device *netdev = pci_get_drvdata(pdev);
2800	struct atl1c_adapter *adapter = netdev_priv(netdev);
2801
2802	netif_device_detach(netdev);
2803
2804	if (state == pci_channel_io_perm_failure)
2805		return PCI_ERS_RESULT_DISCONNECT;
2806
2807	if (netif_running(netdev))
2808		atl1c_down(adapter);
2809
2810	pci_disable_device(pdev);
2811
2812	/* Request a slot reset. */
2813	return PCI_ERS_RESULT_NEED_RESET;
2814}
2815
2816/**
2817 * atl1c_io_slot_reset - called after the pci bus has been reset.
2818 * @pdev: Pointer to PCI device
2819 *
2820 * Restart the card from scratch, as if from a cold-boot. Implementation
2821 * resembles the first-half of the e1000_resume routine.
2822 */
2823static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
2824{
2825	struct net_device *netdev = pci_get_drvdata(pdev);
2826	struct atl1c_adapter *adapter = netdev_priv(netdev);
2827
2828	if (pci_enable_device(pdev)) {
2829		if (netif_msg_hw(adapter))
2830			dev_err(&pdev->dev,
2831				"Cannot re-enable PCI device after reset\n");
2832		return PCI_ERS_RESULT_DISCONNECT;
2833	}
2834	pci_set_master(pdev);
2835
2836	pci_enable_wake(pdev, PCI_D3hot, 0);
2837	pci_enable_wake(pdev, PCI_D3cold, 0);
2838
2839	atl1c_reset_mac(&adapter->hw);
2840
2841	return PCI_ERS_RESULT_RECOVERED;
2842}
2843
2844/**
2845 * atl1c_io_resume - called when traffic can start flowing again.
2846 * @pdev: Pointer to PCI device
2847 *
2848 * This callback is called when the error recovery driver tells us that
2849 * its OK to resume normal operation. Implementation resembles the
2850 * second-half of the atl1c_resume routine.
2851 */
2852static void atl1c_io_resume(struct pci_dev *pdev)
2853{
2854	struct net_device *netdev = pci_get_drvdata(pdev);
2855	struct atl1c_adapter *adapter = netdev_priv(netdev);
2856
2857	if (netif_running(netdev)) {
2858		if (atl1c_up(adapter)) {
2859			if (netif_msg_hw(adapter))
2860				dev_err(&pdev->dev,
2861					"Cannot bring device back up after reset\n");
2862			return;
2863		}
2864	}
2865
2866	netif_device_attach(netdev);
2867}
2868
2869static const struct pci_error_handlers atl1c_err_handler = {
2870	.error_detected = atl1c_io_error_detected,
2871	.slot_reset = atl1c_io_slot_reset,
2872	.resume = atl1c_io_resume,
2873};
2874
2875static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
2876
2877static struct pci_driver atl1c_driver = {
2878	.name     = atl1c_driver_name,
2879	.id_table = atl1c_pci_tbl,
2880	.probe    = atl1c_probe,
2881	.remove   = atl1c_remove,
2882	.shutdown = atl1c_shutdown,
2883	.err_handler = &atl1c_err_handler,
2884	.driver.pm = &atl1c_pm_ops,
2885};
2886
2887module_pci_driver(atl1c_driver);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
   4 *
   5 * Derived from Intel e1000 driver
   6 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
   7 */
   8
   9#include "atl1c.h"
  10
  11char atl1c_driver_name[] = "atl1c";
  12
  13/*
  14 * atl1c_pci_tbl - PCI Device ID Table
  15 *
  16 * Wildcard entries (PCI_ANY_ID) should come last
  17 * Last entry must be all 0s
  18 *
  19 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  20 *   Class, Class Mask, private data (not used) }
  21 */
  22static const struct pci_device_id atl1c_pci_tbl[] = {
  23	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
  24	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
  25	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
  26	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
  27	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
  28	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
  29	/* required last entry */
  30	{ 0 }
  31};
  32MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
  33
  34MODULE_AUTHOR("Jie Yang");
  35MODULE_AUTHOR("Qualcomm Atheros Inc.");
  36MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver");
  37MODULE_LICENSE("GPL");
  38
  39struct atl1c_qregs {
  40	u16 tpd_addr_lo;
  41	u16 tpd_prod;
  42	u16 tpd_cons;
  43	u16 rfd_addr_lo;
  44	u16 rrd_addr_lo;
  45	u16 rfd_prod;
  46	u32 tx_isr;
  47	u32 rx_isr;
  48};
  49
  50static struct atl1c_qregs atl1c_qregs[AT_MAX_TRANSMIT_QUEUE] = {
  51	{
  52		REG_TPD_PRI0_ADDR_LO, REG_TPD_PRI0_PIDX, REG_TPD_PRI0_CIDX,
  53		REG_RFD0_HEAD_ADDR_LO, REG_RRD0_HEAD_ADDR_LO,
  54		REG_MB_RFD0_PROD_IDX, ISR_TX_PKT_0, ISR_RX_PKT_0
  55	},
  56	{
  57		REG_TPD_PRI1_ADDR_LO, REG_TPD_PRI1_PIDX, REG_TPD_PRI1_CIDX,
  58		REG_RFD1_HEAD_ADDR_LO, REG_RRD1_HEAD_ADDR_LO,
  59		REG_MB_RFD1_PROD_IDX, ISR_TX_PKT_1, ISR_RX_PKT_1
  60	},
  61	{
  62		REG_TPD_PRI2_ADDR_LO, REG_TPD_PRI2_PIDX, REG_TPD_PRI2_CIDX,
  63		REG_RFD2_HEAD_ADDR_LO, REG_RRD2_HEAD_ADDR_LO,
  64		REG_MB_RFD2_PROD_IDX, ISR_TX_PKT_2, ISR_RX_PKT_2
  65	},
  66	{
  67		REG_TPD_PRI3_ADDR_LO, REG_TPD_PRI3_PIDX, REG_TPD_PRI3_CIDX,
  68		REG_RFD3_HEAD_ADDR_LO, REG_RRD3_HEAD_ADDR_LO,
  69		REG_MB_RFD3_PROD_IDX, ISR_TX_PKT_3, ISR_RX_PKT_3
  70	},
  71};
  72
  73static int atl1c_stop_mac(struct atl1c_hw *hw);
  74static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
  75static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
  76static void atl1c_start_mac(struct atl1c_adapter *adapter);
  77static int atl1c_up(struct atl1c_adapter *adapter);
  78static void atl1c_down(struct atl1c_adapter *adapter);
  79static int atl1c_reset_mac(struct atl1c_hw *hw);
  80static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
  81static int atl1c_configure(struct atl1c_adapter *adapter);
  82static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
  83				 bool napi_mode);
  84
  85
  86static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  87	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
  88static void atl1c_pcie_patch(struct atl1c_hw *hw)
  89{
  90	u32 mst_data, data;
  91
  92	/* pclk sel could switch to 25M */
  93	AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data);
  94	mst_data &= ~MASTER_CTRL_CLK_SEL_DIS;
  95	AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data);
  96
  97	/* WoL/PCIE related settings */
  98	if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
  99		AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
 100		data |= PCIE_PHYMISC_FORCE_RCV_DET;
 101		AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
 102	} else { /* new dev set bit5 of MASTER */
 103		if (!(mst_data & MASTER_CTRL_WAKEN_25M))
 104			AT_WRITE_REG(hw, REG_MASTER_CTRL,
 105				mst_data | MASTER_CTRL_WAKEN_25M);
 106	}
 107	/* aspm/PCIE setting only for l2cb 1.0 */
 108	if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
 109		AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
 110		data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW,
 111			L2CB1_PCIE_PHYMISC2_CDR_BW);
 112		data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH,
 113			L2CB1_PCIE_PHYMISC2_L0S_TH);
 114		AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
 115		/* extend L1 sync timer */
 116		AT_READ_REG(hw, REG_LINK_CTRL, &data);
 117		data |= LINK_CTRL_EXT_SYNC;
 118		AT_WRITE_REG(hw, REG_LINK_CTRL, data);
 119	}
 120	/* l2cb 1.x & l1d 1.x */
 121	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) {
 122		AT_READ_REG(hw, REG_PM_CTRL, &data);
 123		data |= PM_CTRL_L0S_BUFSRX_EN;
 124		AT_WRITE_REG(hw, REG_PM_CTRL, data);
 125		/* clear vendor msg */
 126		AT_READ_REG(hw, REG_DMA_DBG, &data);
 127		AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG);
 128	}
 129}
 130
 131/* FIXME: no need any more ? */
 132/*
 133 * atl1c_init_pcie - init PCIE module
 134 */
 135static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
 136{
 137	u32 data;
 138	u32 pci_cmd;
 139	struct pci_dev *pdev = hw->adapter->pdev;
 140	int pos;
 141
 142	AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
 143	pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
 144	pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
 145		PCI_COMMAND_IO);
 146	AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd);
 147
 148	/*
 149	 * Clear any PowerSaveing Settings
 150	 */
 151	pci_enable_wake(pdev, PCI_D3hot, 0);
 152	pci_enable_wake(pdev, PCI_D3cold, 0);
 153	/* wol sts read-clear */
 154	AT_READ_REG(hw, REG_WOL_CTRL, &data);
 155	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
 156
 157	/*
 158	 * Mask some pcie error bits
 159	 */
 160	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
 161	if (pos) {
 162		pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
 163		data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
 164		pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
 165	}
 166	/* clear error status */
 167	pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
 168			PCI_EXP_DEVSTA_NFED |
 169			PCI_EXP_DEVSTA_FED |
 170			PCI_EXP_DEVSTA_CED |
 171			PCI_EXP_DEVSTA_URD);
 172
 173	AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
 174	data &= ~LTSSM_ID_EN_WRO;
 175	AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
 176
 177	atl1c_pcie_patch(hw);
 178	if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
 179		atl1c_disable_l0s_l1(hw);
 180
 181	msleep(5);
 182}
 183
 184/**
 185 * atl1c_irq_enable - Enable default interrupt generation settings
 186 * @adapter: board private structure
 187 */
 188static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
 189{
 190	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
 191		AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF);
 192		AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
 193		AT_WRITE_FLUSH(&adapter->hw);
 194	}
 195}
 196
 197/**
 198 * atl1c_irq_disable - Mask off interrupt generation on the NIC
 199 * @adapter: board private structure
 200 */
 201static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
 202{
 203	atomic_inc(&adapter->irq_sem);
 204	AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
 205	AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
 206	AT_WRITE_FLUSH(&adapter->hw);
 207	synchronize_irq(adapter->pdev->irq);
 208}
 209
 210/**
 211 * atl1c_irq_reset - reset interrupt confiure on the NIC
 212 * @adapter: board private structure
 213 */
 214static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
 215{
 216	atomic_set(&adapter->irq_sem, 1);
 217	atl1c_irq_enable(adapter);
 218}
 219
 220/*
 221 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
 222 * of the idle status register until the device is actually idle
 223 */
 224static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
 225{
 226	int timeout;
 227	u32 data;
 228
 229	for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
 230		AT_READ_REG(hw, REG_IDLE_STATUS, &data);
 231		if ((data & modu_ctrl) == 0)
 232			return 0;
 233		msleep(1);
 234	}
 235	return data;
 236}
 237
 238/**
 239 * atl1c_phy_config - Timer Call-back
 240 * @t: timer list containing pointer to netdev cast into an unsigned long
 241 */
 242static void atl1c_phy_config(struct timer_list *t)
 243{
 244	struct atl1c_adapter *adapter = from_timer(adapter, t,
 245						   phy_config_timer);
 246	struct atl1c_hw *hw = &adapter->hw;
 247	unsigned long flags;
 248
 249	spin_lock_irqsave(&adapter->mdio_lock, flags);
 250	atl1c_restart_autoneg(hw);
 251	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
 252}
 253
 254void atl1c_reinit_locked(struct atl1c_adapter *adapter)
 255{
 256	atl1c_down(adapter);
 257	atl1c_up(adapter);
 258	clear_bit(__AT_RESETTING, &adapter->flags);
 259}
 260
 261static void atl1c_check_link_status(struct atl1c_adapter *adapter)
 262{
 263	struct atl1c_hw *hw = &adapter->hw;
 264	struct net_device *netdev = adapter->netdev;
 265	struct pci_dev    *pdev   = adapter->pdev;
 266	int err;
 267	unsigned long flags;
 268	u16 speed, duplex;
 269	bool link;
 270
 271	spin_lock_irqsave(&adapter->mdio_lock, flags);
 272	link = atl1c_get_link_status(hw);
 273	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
 274
 275	if (!link) {
 276		/* link down */
 277		netif_carrier_off(netdev);
 278		hw->hibernate = true;
 279		if (atl1c_reset_mac(hw) != 0)
 280			if (netif_msg_hw(adapter))
 281				dev_warn(&pdev->dev, "reset mac failed\n");
 282		atl1c_set_aspm(hw, SPEED_0);
 283		atl1c_post_phy_linkchg(hw, SPEED_0);
 284		atl1c_reset_dma_ring(adapter);
 285		atl1c_configure(adapter);
 286	} else {
 287		/* Link Up */
 288		hw->hibernate = false;
 289		spin_lock_irqsave(&adapter->mdio_lock, flags);
 290		err = atl1c_get_speed_and_duplex(hw, &speed, &duplex);
 291		spin_unlock_irqrestore(&adapter->mdio_lock, flags);
 292		if (unlikely(err))
 293			return;
 294		/* link result is our setting */
 295		if (adapter->link_speed != speed ||
 296		    adapter->link_duplex != duplex) {
 297			adapter->link_speed  = speed;
 298			adapter->link_duplex = duplex;
 299			atl1c_set_aspm(hw, speed);
 300			atl1c_post_phy_linkchg(hw, speed);
 301			atl1c_start_mac(adapter);
 302			if (netif_msg_link(adapter))
 303				dev_info(&pdev->dev,
 304					"%s: %s NIC Link is Up<%d Mbps %s>\n",
 305					atl1c_driver_name, netdev->name,
 306					adapter->link_speed,
 307					adapter->link_duplex == FULL_DUPLEX ?
 308					"Full Duplex" : "Half Duplex");
 309		}
 310		if (!netif_carrier_ok(netdev))
 311			netif_carrier_on(netdev);
 312	}
 313}
 314
 315static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
 316{
 317	struct net_device *netdev = adapter->netdev;
 318	struct pci_dev    *pdev   = adapter->pdev;
 319	bool link;
 320
 321	spin_lock(&adapter->mdio_lock);
 322	link = atl1c_get_link_status(&adapter->hw);
 323	spin_unlock(&adapter->mdio_lock);
 324	/* notify upper layer link down ASAP */
 325	if (!link) {
 326		if (netif_carrier_ok(netdev)) {
 327			/* old link state: Up */
 328			netif_carrier_off(netdev);
 329			if (netif_msg_link(adapter))
 330				dev_info(&pdev->dev,
 331					"%s: %s NIC Link is Down\n",
 332					atl1c_driver_name, netdev->name);
 333			adapter->link_speed = SPEED_0;
 334		}
 335	}
 336
 337	set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event);
 338	schedule_work(&adapter->common_task);
 339}
 340
 341static void atl1c_common_task(struct work_struct *work)
 342{
 343	struct atl1c_adapter *adapter;
 344	struct net_device *netdev;
 345
 346	adapter = container_of(work, struct atl1c_adapter, common_task);
 347	netdev = adapter->netdev;
 348
 349	if (test_bit(__AT_DOWN, &adapter->flags))
 350		return;
 351
 352	if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
 353		netif_device_detach(netdev);
 354		atl1c_down(adapter);
 355		atl1c_up(adapter);
 356		netif_device_attach(netdev);
 357	}
 358
 359	if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
 360		&adapter->work_event)) {
 361		atl1c_irq_disable(adapter);
 362		atl1c_check_link_status(adapter);
 363		atl1c_irq_enable(adapter);
 364	}
 365}
 366
 367
 368static void atl1c_del_timer(struct atl1c_adapter *adapter)
 369{
 370	del_timer_sync(&adapter->phy_config_timer);
 371}
 372
 373
 374/**
 375 * atl1c_tx_timeout - Respond to a Tx Hang
 376 * @netdev: network interface device structure
 377 * @txqueue: index of hanging tx queue
 378 */
 379static void atl1c_tx_timeout(struct net_device *netdev, unsigned int txqueue)
 380{
 381	struct atl1c_adapter *adapter = netdev_priv(netdev);
 382
 383	/* Do the reset outside of interrupt context */
 384	set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
 385	schedule_work(&adapter->common_task);
 386}
 387
 388/**
 389 * atl1c_set_multi - Multicast and Promiscuous mode set
 390 * @netdev: network interface device structure
 391 *
 392 * The set_multi entry point is called whenever the multicast address
 393 * list or the network interface flags are updated.  This routine is
 394 * responsible for configuring the hardware for proper multicast,
 395 * promiscuous mode, and all-multi behavior.
 396 */
 397static void atl1c_set_multi(struct net_device *netdev)
 398{
 399	struct atl1c_adapter *adapter = netdev_priv(netdev);
 400	struct atl1c_hw *hw = &adapter->hw;
 401	struct netdev_hw_addr *ha;
 402	u32 mac_ctrl_data;
 403	u32 hash_value;
 404
 405	/* Check for Promiscuous and All Multicast modes */
 406	AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
 407
 408	if (netdev->flags & IFF_PROMISC) {
 409		mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
 410	} else if (netdev->flags & IFF_ALLMULTI) {
 411		mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
 412		mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
 413	} else {
 414		mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
 415	}
 416
 417	AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
 418
 419	/* clear the old settings from the multicast hash table */
 420	AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
 421	AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
 422
 423	/* comoute mc addresses' hash value ,and put it into hash table */
 424	netdev_for_each_mc_addr(ha, netdev) {
 425		hash_value = atl1c_hash_mc_addr(hw, ha->addr);
 426		atl1c_hash_set(hw, hash_value);
 427	}
 428}
 429
 430static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 431{
 432	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
 433		/* enable VLAN tag insert/strip */
 434		*mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
 435	} else {
 436		/* disable VLAN tag insert/strip */
 437		*mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
 438	}
 439}
 440
 441static void atl1c_vlan_mode(struct net_device *netdev,
 442	netdev_features_t features)
 443{
 444	struct atl1c_adapter *adapter = netdev_priv(netdev);
 445	struct pci_dev *pdev = adapter->pdev;
 446	u32 mac_ctrl_data = 0;
 447
 448	if (netif_msg_pktdata(adapter))
 449		dev_dbg(&pdev->dev, "atl1c_vlan_mode\n");
 450
 451	atl1c_irq_disable(adapter);
 452	AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data);
 453	__atl1c_vlan_mode(features, &mac_ctrl_data);
 454	AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
 455	atl1c_irq_enable(adapter);
 456}
 457
 458static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
 459{
 460	struct pci_dev *pdev = adapter->pdev;
 461
 462	if (netif_msg_pktdata(adapter))
 463		dev_dbg(&pdev->dev, "atl1c_restore_vlan\n");
 464	atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
 465}
 466
 467/**
 468 * atl1c_set_mac_addr - Change the Ethernet Address of the NIC
 469 * @netdev: network interface device structure
 470 * @p: pointer to an address structure
 471 *
 472 * Returns 0 on success, negative on failure
 473 */
 474static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
 475{
 476	struct atl1c_adapter *adapter = netdev_priv(netdev);
 477	struct sockaddr *addr = p;
 478
 479	if (!is_valid_ether_addr(addr->sa_data))
 480		return -EADDRNOTAVAIL;
 481
 482	if (netif_running(netdev))
 483		return -EBUSY;
 484
 485	eth_hw_addr_set(netdev, addr->sa_data);
 486	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
 487
 488	atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
 489
 490	return 0;
 491}
 492
 493static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
 494				struct net_device *dev)
 495{
 496	unsigned int head_size;
 497	int mtu = dev->mtu;
 498
 499	adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
 500		roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
 501
 502	head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
 503		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 504	adapter->rx_frag_size = roundup_pow_of_two(head_size);
 505}
 506
 507static netdev_features_t atl1c_fix_features(struct net_device *netdev,
 508	netdev_features_t features)
 509{
 510	struct atl1c_adapter *adapter = netdev_priv(netdev);
 511	struct atl1c_hw *hw = &adapter->hw;
 512
 513	/*
 514	 * Since there is no support for separate rx/tx vlan accel
 515	 * enable/disable make sure tx flag is always in same state as rx.
 516	 */
 517	if (features & NETIF_F_HW_VLAN_CTAG_RX)
 518		features |= NETIF_F_HW_VLAN_CTAG_TX;
 519	else
 520		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 521
 522	if (hw->nic_type != athr_mt) {
 523		if (netdev->mtu > MAX_TSO_FRAME_SIZE)
 524			features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
 525	}
 526
 527	return features;
 528}
 529
 530static int atl1c_set_features(struct net_device *netdev,
 531	netdev_features_t features)
 532{
 533	netdev_features_t changed = netdev->features ^ features;
 534
 535	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 536		atl1c_vlan_mode(netdev, features);
 537
 538	return 0;
 539}
 540
 541static void atl1c_set_max_mtu(struct net_device *netdev)
 542{
 543	struct atl1c_adapter *adapter = netdev_priv(netdev);
 544	struct atl1c_hw *hw = &adapter->hw;
 545
 546	switch (hw->nic_type) {
 547	/* These (GbE) devices support jumbo packets, max_mtu 6122 */
 548	case athr_l1c:
 549	case athr_l1d:
 550	case athr_l1d_2:
 551		netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
 552			(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
 553		break;
 554	case athr_mt:
 555		netdev->max_mtu = 9500;
 556		break;
 557		/* The 10/100 devices don't support jumbo packets, max_mtu 1500 */
 558	default:
 559		netdev->max_mtu = ETH_DATA_LEN;
 560		break;
 561	}
 562}
 563
 564/**
 565 * atl1c_change_mtu - Change the Maximum Transfer Unit
 566 * @netdev: network interface device structure
 567 * @new_mtu: new value for maximum frame size
 568 *
 569 * Returns 0 on success, negative on failure
 570 */
 571static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
 572{
 573	struct atl1c_adapter *adapter = netdev_priv(netdev);
 574
 575	/* set MTU */
 576	if (netif_running(netdev)) {
 577		while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
 578			msleep(1);
 579		netdev->mtu = new_mtu;
 580		adapter->hw.max_frame_size = new_mtu;
 581		atl1c_set_rxbufsize(adapter, netdev);
 582		atl1c_down(adapter);
 583		netdev_update_features(netdev);
 584		atl1c_up(adapter);
 585		clear_bit(__AT_RESETTING, &adapter->flags);
 586	}
 587	return 0;
 588}
 589
 590/*
 591 *  caller should hold mdio_lock
 592 */
 593static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
 594{
 595	struct atl1c_adapter *adapter = netdev_priv(netdev);
 596	u16 result;
 597
 598	atl1c_read_phy_reg(&adapter->hw, reg_num, &result);
 599	return result;
 600}
 601
 602static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
 603			     int reg_num, int val)
 604{
 605	struct atl1c_adapter *adapter = netdev_priv(netdev);
 606
 607	atl1c_write_phy_reg(&adapter->hw, reg_num, val);
 608}
 609
 610static int atl1c_mii_ioctl(struct net_device *netdev,
 611			   struct ifreq *ifr, int cmd)
 612{
 613	struct atl1c_adapter *adapter = netdev_priv(netdev);
 614	struct pci_dev *pdev = adapter->pdev;
 615	struct mii_ioctl_data *data = if_mii(ifr);
 616	unsigned long flags;
 617	int retval = 0;
 618
 619	if (!netif_running(netdev))
 620		return -EINVAL;
 621
 622	spin_lock_irqsave(&adapter->mdio_lock, flags);
 623	switch (cmd) {
 624	case SIOCGMIIPHY:
 625		data->phy_id = 0;
 626		break;
 627
 628	case SIOCGMIIREG:
 629		if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
 630				    &data->val_out)) {
 631			retval = -EIO;
 632			goto out;
 633		}
 634		break;
 635
 636	case SIOCSMIIREG:
 637		if (data->reg_num & ~(0x1F)) {
 638			retval = -EFAULT;
 639			goto out;
 640		}
 641
 642		dev_dbg(&pdev->dev, "<atl1c_mii_ioctl> write %x %x",
 643				data->reg_num, data->val_in);
 644		if (atl1c_write_phy_reg(&adapter->hw,
 645				     data->reg_num, data->val_in)) {
 646			retval = -EIO;
 647			goto out;
 648		}
 649		break;
 650
 651	default:
 652		retval = -EOPNOTSUPP;
 653		break;
 654	}
 655out:
 656	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
 657	return retval;
 658}
 659
 660static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 661{
 662	switch (cmd) {
 663	case SIOCGMIIPHY:
 664	case SIOCGMIIREG:
 665	case SIOCSMIIREG:
 666		return atl1c_mii_ioctl(netdev, ifr, cmd);
 667	default:
 668		return -EOPNOTSUPP;
 669	}
 670}
 671
 672/**
 673 * atl1c_alloc_queues - Allocate memory for all rings
 674 * @adapter: board private structure to initialize
 675 *
 676 */
 677static int atl1c_alloc_queues(struct atl1c_adapter *adapter)
 678{
 679	return 0;
 680}
 681
 682static enum atl1c_nic_type atl1c_get_mac_type(struct pci_dev *pdev,
 683					      u8 __iomem *hw_addr)
 684{
 685	switch (pdev->device) {
 686	case PCI_DEVICE_ID_ATTANSIC_L2C:
 687		return athr_l2c;
 688	case PCI_DEVICE_ID_ATTANSIC_L1C:
 689		return athr_l1c;
 690	case PCI_DEVICE_ID_ATHEROS_L2C_B:
 691		return athr_l2c_b;
 692	case PCI_DEVICE_ID_ATHEROS_L2C_B2:
 693		return athr_l2c_b2;
 694	case PCI_DEVICE_ID_ATHEROS_L1D:
 695		return athr_l1d;
 696	case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
 697		if (readl(hw_addr + REG_MT_MAGIC) == MT_MAGIC)
 698			return athr_mt;
 699		return athr_l1d_2;
 700	default:
 701		return athr_l1c;
 702	}
 703}
 704
 705static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
 706{
 707	u32 link_ctrl_data;
 708
 709	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
 710
 711	hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE  |
 712			 ATL1C_TXQ_MODE_ENHANCE;
 713	hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT |
 714			  ATL1C_ASPM_L1_SUPPORT;
 715	hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
 716
 717	if (hw->nic_type == athr_l1c ||
 718	    hw->nic_type == athr_l1d ||
 719	    hw->nic_type == athr_l1d_2)
 720		hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
 721	return 0;
 722}
 723
 724struct atl1c_platform_patch {
 725	u16 pci_did;
 726	u8  pci_revid;
 727	u16 subsystem_vid;
 728	u16 subsystem_did;
 729	u32 patch_flag;
 730#define ATL1C_LINK_PATCH	0x1
 731};
 732static const struct atl1c_platform_patch plats[] = {
 733{0x2060, 0xC1, 0x1019, 0x8152, 0x1},
 734{0x2060, 0xC1, 0x1019, 0x2060, 0x1},
 735{0x2060, 0xC1, 0x1019, 0xE000, 0x1},
 736{0x2062, 0xC0, 0x1019, 0x8152, 0x1},
 737{0x2062, 0xC0, 0x1019, 0x2062, 0x1},
 738{0x2062, 0xC0, 0x1458, 0xE000, 0x1},
 739{0x2062, 0xC1, 0x1019, 0x8152, 0x1},
 740{0x2062, 0xC1, 0x1019, 0x2062, 0x1},
 741{0x2062, 0xC1, 0x1458, 0xE000, 0x1},
 742{0x2062, 0xC1, 0x1565, 0x2802, 0x1},
 743{0x2062, 0xC1, 0x1565, 0x2801, 0x1},
 744{0x1073, 0xC0, 0x1019, 0x8151, 0x1},
 745{0x1073, 0xC0, 0x1019, 0x1073, 0x1},
 746{0x1073, 0xC0, 0x1458, 0xE000, 0x1},
 747{0x1083, 0xC0, 0x1458, 0xE000, 0x1},
 748{0x1083, 0xC0, 0x1019, 0x8151, 0x1},
 749{0x1083, 0xC0, 0x1019, 0x1083, 0x1},
 750{0x1083, 0xC0, 0x1462, 0x7680, 0x1},
 751{0x1083, 0xC0, 0x1565, 0x2803, 0x1},
 752{0},
 753};
 754
 755static void atl1c_patch_assign(struct atl1c_hw *hw)
 756{
 757	struct pci_dev	*pdev = hw->adapter->pdev;
 758	u32 misc_ctrl;
 759	int i = 0;
 760
 761	hw->msi_lnkpatch = false;
 762
 763	while (plats[i].pci_did != 0) {
 764		if (plats[i].pci_did == hw->device_id &&
 765		    plats[i].pci_revid == hw->revision_id &&
 766		    plats[i].subsystem_vid == hw->subsystem_vendor_id &&
 767		    plats[i].subsystem_did == hw->subsystem_id) {
 768			if (plats[i].patch_flag & ATL1C_LINK_PATCH)
 769				hw->msi_lnkpatch = true;
 770		}
 771		i++;
 772	}
 773
 774	if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
 775	    hw->revision_id == L2CB_V21) {
 776		/* config access mode */
 777		pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
 778				       REG_PCIE_DEV_MISC_CTRL);
 779		pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
 780		misc_ctrl &= ~0x100;
 781		pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
 782				       REG_PCIE_DEV_MISC_CTRL);
 783		pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl);
 784	}
 785}
 786/**
 787 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
 788 * @adapter: board private structure to initialize
 789 *
 790 * atl1c_sw_init initializes the Adapter private data structure.
 791 * Fields are initialized based on PCI device information and
 792 * OS network device settings (MTU size).
 793 */
 794static int atl1c_sw_init(struct atl1c_adapter *adapter)
 795{
 796	struct atl1c_hw *hw   = &adapter->hw;
 797	struct pci_dev	*pdev = adapter->pdev;
 798	u32 revision;
 799	int i;
 800
 801	adapter->wol = 0;
 802	device_set_wakeup_enable(&pdev->dev, false);
 803	adapter->link_speed = SPEED_0;
 804	adapter->link_duplex = FULL_DUPLEX;
 805	adapter->tpd_ring[0].count = 1024;
 806	adapter->rfd_ring[0].count = 512;
 807
 808	hw->vendor_id = pdev->vendor;
 809	hw->device_id = pdev->device;
 810	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 811	hw->subsystem_id = pdev->subsystem_device;
 812	pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision);
 813	hw->revision_id = revision & 0xFF;
 814	/* before link up, we assume hibernate is true */
 815	hw->hibernate = true;
 816	hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
 817	if (atl1c_setup_mac_funcs(hw) != 0) {
 818		dev_err(&pdev->dev, "set mac function pointers failed\n");
 819		return -1;
 820	}
 821	atl1c_patch_assign(hw);
 822
 823	hw->intr_mask = IMR_NORMAL_MASK;
 824	for (i = 0; i < adapter->tx_queue_count; ++i)
 825		hw->intr_mask |= atl1c_qregs[i].tx_isr;
 826	for (i = 0; i < adapter->rx_queue_count; ++i)
 827		hw->intr_mask |= atl1c_qregs[i].rx_isr;
 828	hw->phy_configured = false;
 829	hw->preamble_len = 7;
 830	hw->max_frame_size = adapter->netdev->mtu;
 831	hw->autoneg_advertised = ADVERTISED_Autoneg;
 832	hw->indirect_tab = 0xE4E4E4E4;
 833	hw->base_cpu = 0;
 834
 835	hw->ict = 50000;		/* 100ms */
 836	hw->smb_timer = 200000;	  	/* 400ms */
 837	hw->rx_imt = 200;
 838	hw->tx_imt = 1000;
 839
 840	hw->tpd_burst = 5;
 841	hw->rfd_burst = 8;
 842	hw->dma_order = atl1c_dma_ord_out;
 843	hw->dmar_block = atl1c_dma_req_1024;
 844
 845	if (atl1c_alloc_queues(adapter)) {
 846		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 847		return -ENOMEM;
 848	}
 849	/* TODO */
 850	atl1c_set_rxbufsize(adapter, adapter->netdev);
 851	atomic_set(&adapter->irq_sem, 1);
 852	spin_lock_init(&adapter->mdio_lock);
 853	spin_lock_init(&adapter->hw.intr_mask_lock);
 854	set_bit(__AT_DOWN, &adapter->flags);
 855
 856	return 0;
 857}
 858
 859static inline void atl1c_clean_buffer(struct pci_dev *pdev,
 860				struct atl1c_buffer *buffer_info)
 
 861{
 862	u16 pci_driection;
 863	if (buffer_info->flags & ATL1C_BUFFER_FREE)
 864		return;
 865	if (buffer_info->dma) {
 866		if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE)
 867			pci_driection = DMA_FROM_DEVICE;
 868		else
 869			pci_driection = DMA_TO_DEVICE;
 870
 871		if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
 872			dma_unmap_single(&pdev->dev, buffer_info->dma,
 873					 buffer_info->length, pci_driection);
 874		else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
 875			dma_unmap_page(&pdev->dev, buffer_info->dma,
 876				       buffer_info->length, pci_driection);
 877	}
 878	if (buffer_info->skb)
 879		dev_consume_skb_any(buffer_info->skb);
 880	buffer_info->dma = 0;
 881	buffer_info->skb = NULL;
 882	ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
 883}
 884/**
 885 * atl1c_clean_tx_ring - Free Tx-skb
 886 * @adapter: board private structure
 887 * @queue: idx of transmit queue
 888 */
 889static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
 890				u32 queue)
 891{
 892	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
 893	struct atl1c_buffer *buffer_info;
 894	struct pci_dev *pdev = adapter->pdev;
 895	u16 index, ring_count;
 896
 897	ring_count = tpd_ring->count;
 898	for (index = 0; index < ring_count; index++) {
 899		buffer_info = &tpd_ring->buffer_info[index];
 900		atl1c_clean_buffer(pdev, buffer_info);
 901	}
 902
 903	netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
 904
 905	/* Zero out Tx-buffers */
 906	memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
 907		ring_count);
 908	atomic_set(&tpd_ring->next_to_clean, 0);
 909	tpd_ring->next_to_use = 0;
 910}
 911
 912/**
 913 * atl1c_clean_rx_ring - Free rx-reservation skbs
 914 * @adapter: board private structure
 915 * @queue: idx of transmit queue
 916 */
 917static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue)
 918{
 919	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
 920	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
 921	struct atl1c_buffer *buffer_info;
 922	struct pci_dev *pdev = adapter->pdev;
 923	int j;
 924
 925	for (j = 0; j < rfd_ring->count; j++) {
 926		buffer_info = &rfd_ring->buffer_info[j];
 927		atl1c_clean_buffer(pdev, buffer_info);
 928	}
 929	/* zero out the descriptor ring */
 930	memset(rfd_ring->desc, 0, rfd_ring->size);
 931	rfd_ring->next_to_clean = 0;
 932	rfd_ring->next_to_use = 0;
 933	rrd_ring->next_to_use = 0;
 934	rrd_ring->next_to_clean = 0;
 935}
 936
 937/*
 938 * Read / Write Ptr Initialize:
 939 */
 940static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
 941{
 942	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
 943	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
 944	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
 945	struct atl1c_buffer *buffer_info;
 946	int i, j;
 947
 948	for (i = 0; i < adapter->tx_queue_count; i++) {
 949		tpd_ring[i].next_to_use = 0;
 950		atomic_set(&tpd_ring[i].next_to_clean, 0);
 951		buffer_info = tpd_ring[i].buffer_info;
 952		for (j = 0; j < tpd_ring->count; j++)
 953			ATL1C_SET_BUFFER_STATE(&buffer_info[i],
 954					       ATL1C_BUFFER_FREE);
 955	}
 956	for (i = 0; i < adapter->rx_queue_count; i++) {
 957		rfd_ring[i].next_to_use = 0;
 958		rfd_ring[i].next_to_clean = 0;
 959		rrd_ring[i].next_to_use = 0;
 960		rrd_ring[i].next_to_clean = 0;
 961		for (j = 0; j < rfd_ring[i].count; j++) {
 962			buffer_info = &rfd_ring[i].buffer_info[j];
 963			ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
 964		}
 965	}
 966}
 967
 968/**
 969 * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
 970 * @adapter: board private structure
 971 *
 972 * Free all transmit software resources
 973 */
 974static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
 975{
 976	struct pci_dev *pdev = adapter->pdev;
 977	int i;
 978
 979	dma_free_coherent(&pdev->dev, adapter->ring_header.size,
 980			  adapter->ring_header.desc, adapter->ring_header.dma);
 981	adapter->ring_header.desc = NULL;
 982
 983	/* Note: just free tdp_ring.buffer_info,
 984	 * it contain rfd_ring.buffer_info, do not double free
 985	 */
 986	if (adapter->tpd_ring[0].buffer_info) {
 987		kfree(adapter->tpd_ring[0].buffer_info);
 988		adapter->tpd_ring[0].buffer_info = NULL;
 989	}
 990	for (i = 0; i < adapter->rx_queue_count; ++i) {
 991		if (adapter->rrd_ring[i].rx_page) {
 992			put_page(adapter->rrd_ring[i].rx_page);
 993			adapter->rrd_ring[i].rx_page = NULL;
 994		}
 995	}
 996}
 997
 998/**
 999 * atl1c_setup_ring_resources - allocate Tx / RX descriptor resources
1000 * @adapter: board private structure
1001 *
1002 * Return 0 on success, negative on failure
1003 */
1004static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1005{
1006	struct pci_dev *pdev = adapter->pdev;
1007	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
1008	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
1009	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
1010	struct atl1c_ring_header *ring_header = &adapter->ring_header;
1011	int tqc = adapter->tx_queue_count;
1012	int rqc = adapter->rx_queue_count;
1013	int size;
1014	int i;
1015	int count = 0;
1016	u32 offset = 0;
1017
1018	/* Even though only one tpd queue is actually used, the "high"
1019	 * priority tpd queue also gets initialized
1020	 */
1021	if (tqc == 1)
1022		tqc = 2;
1023
1024	for (i = 1; i < tqc; i++)
1025		tpd_ring[i].count = tpd_ring[0].count;
1026
1027	size = sizeof(struct atl1c_buffer) * (tpd_ring->count * tqc +
1028					      rfd_ring->count * rqc);
1029	tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
1030	if (unlikely(!tpd_ring->buffer_info))
1031		goto err_nomem;
1032
1033	for (i = 0; i < tqc; i++) {
1034		tpd_ring[i].adapter = adapter;
1035		tpd_ring[i].num = i;
1036		tpd_ring[i].buffer_info = (tpd_ring->buffer_info + count);
1037		count += tpd_ring[i].count;
1038	}
1039
1040	for (i = 0; i < rqc; i++) {
1041		rrd_ring[i].adapter = adapter;
1042		rrd_ring[i].num = i;
1043		rrd_ring[i].count = rfd_ring[0].count;
1044		rfd_ring[i].count = rfd_ring[0].count;
1045		rfd_ring[i].buffer_info = (tpd_ring->buffer_info + count);
1046		count += rfd_ring->count;
1047	}
1048
1049	/*
1050	 * real ring DMA buffer
1051	 * each ring/block may need up to 8 bytes for alignment, hence the
1052	 * additional bytes tacked onto the end.
1053	 */
1054	ring_header->size =
1055		sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
1056		sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
1057		sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
1058		8 * 4;
1059
1060	ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
1061					       &ring_header->dma, GFP_KERNEL);
1062	if (unlikely(!ring_header->desc)) {
1063		dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
1064		goto err_nomem;
1065	}
1066	/* init TPD ring */
1067
1068	tpd_ring[0].dma = roundup(ring_header->dma, 8);
1069	offset = tpd_ring[0].dma - ring_header->dma;
1070	for (i = 0; i < tqc; i++) {
1071		tpd_ring[i].dma = ring_header->dma + offset;
1072		tpd_ring[i].desc = (u8 *)ring_header->desc + offset;
1073		tpd_ring[i].size =
1074			sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count;
1075		offset += roundup(tpd_ring[i].size, 8);
1076	}
1077	for (i = 0; i < rqc; i++) {
1078		/* init RFD ring */
1079		rfd_ring[i].dma = ring_header->dma + offset;
1080		rfd_ring[i].desc = (u8 *)ring_header->desc + offset;
1081		rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
1082			rfd_ring[i].count;
1083		offset += roundup(rfd_ring[i].size, 8);
1084
1085		/* init RRD ring */
1086		rrd_ring[i].dma = ring_header->dma + offset;
1087		rrd_ring[i].desc = (u8 *)ring_header->desc + offset;
1088		rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
1089			rrd_ring[i].count;
1090		offset += roundup(rrd_ring[i].size, 8);
1091	}
1092
1093	return 0;
1094
1095err_nomem:
1096	kfree(tpd_ring->buffer_info);
1097	return -ENOMEM;
1098}
1099
1100static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1101{
1102	struct atl1c_hw *hw = &adapter->hw;
1103	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
1104	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
1105	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
1106	int i;
1107	int tx_queue_count = adapter->tx_queue_count;
1108
1109	if (tx_queue_count == 1)
1110		tx_queue_count = 2;
1111
1112	/* TPD */
1113	AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
1114		     (u32)((tpd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
1115	/* just enable normal priority TX queue */
1116	for (i = 0; i < tx_queue_count; i++) {
1117		AT_WRITE_REG(hw, atl1c_qregs[i].tpd_addr_lo,
1118			     (u32)(tpd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1119	}
1120	AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
1121			(u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK));
1122
1123
1124	/* RFD */
1125	AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
1126		     (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1127	for (i = 0; i < adapter->rx_queue_count; i++) {
1128		AT_WRITE_REG(hw, atl1c_qregs[i].rfd_addr_lo,
1129			     (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1130	}
1131
1132	AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
1133			rfd_ring->count & RFD_RING_SIZE_MASK);
1134	AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
1135			adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
1136
1137	/* RRD */
1138	for (i = 0; i < adapter->rx_queue_count; i++) {
1139		AT_WRITE_REG(hw, atl1c_qregs[i].rrd_addr_lo,
1140			     (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1141	}
1142	AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
1143			(rrd_ring->count & RRD_RING_SIZE_MASK));
1144
1145	if (hw->nic_type == athr_l2c_b) {
1146		AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
1147		AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
1148		AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
1149		AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
1150		AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
1151		AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
1152		AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0);	/* TX watermark, to enter l1 state.*/
1153		AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0);		/* RXD threshold.*/
1154	}
1155	/* Load all of base address above */
1156	AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
1157}
1158
1159static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1160{
1161	struct atl1c_hw *hw = &adapter->hw;
1162	int max_pay_load;
1163	u16 tx_offload_thresh;
1164	u32 txq_ctrl_data;
1165
1166	tx_offload_thresh = MAX_TSO_FRAME_SIZE;
1167	AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
1168		(tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
1169	max_pay_load = pcie_get_readrq(adapter->pdev) >> 8;
1170	hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
1171	/*
1172	 * if BIOS had changed the dam-read-max-length to an invalid value,
1173	 * restore it to default value
1174	 */
1175	if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) {
1176		pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN);
1177		hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN;
1178	}
1179	txq_ctrl_data =
1180		hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ?
1181		L2CB_TXQ_CFGV : L1C_TXQ_CFGV;
1182
1183	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1184}
1185
1186static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1187{
1188	struct atl1c_hw *hw = &adapter->hw;
1189	u32 rxq_ctrl_data;
1190
1191	rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) <<
1192			RXQ_RFD_BURST_NUM_SHIFT;
1193
1194	if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
1195		rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
1196
1197	/* aspm for gigabit */
1198	if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0)
1199		rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT,
1200			ASPM_THRUPUT_LIMIT_100M);
1201
1202	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1203}
1204
1205static void atl1c_configure_dma(struct atl1c_adapter *adapter)
1206{
1207	struct atl1c_hw *hw = &adapter->hw;
1208	u32 dma_ctrl_data;
1209
1210	dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) |
1211		DMA_CTRL_RREQ_PRI_DATA |
1212		FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) |
1213		FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) |
1214		FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF);
1215
1216	AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1217}
1218
1219/*
1220 * Stop the mac, transmit and receive units
1221 * hw - Struct containing variables accessed by shared code
1222 * return : 0  or  idle status (if error)
1223 */
1224static int atl1c_stop_mac(struct atl1c_hw *hw)
1225{
1226	u32 data;
1227
1228	AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1229	data &= ~RXQ_CTRL_EN;
1230	AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1231
1232	AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1233	data &= ~TXQ_CTRL_EN;
1234	AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
1235
1236	atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY);
1237
1238	AT_READ_REG(hw, REG_MAC_CTRL, &data);
1239	data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
1240	AT_WRITE_REG(hw, REG_MAC_CTRL, data);
1241
1242	return (int)atl1c_wait_until_idle(hw,
1243		IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY);
1244}
1245
1246static void atl1c_start_mac(struct atl1c_adapter *adapter)
1247{
1248	struct atl1c_hw *hw = &adapter->hw;
1249	u32 mac, txq, rxq;
1250
1251	hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX;
1252	hw->mac_speed = adapter->link_speed == SPEED_1000 ?
1253		atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
1254
1255	AT_READ_REG(hw, REG_TXQ_CTRL, &txq);
1256	AT_READ_REG(hw, REG_RXQ_CTRL, &rxq);
1257	AT_READ_REG(hw, REG_MAC_CTRL, &mac);
1258
1259	txq |= TXQ_CTRL_EN;
1260	rxq |= RXQ_CTRL_EN;
1261	mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW |
1262	       MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW |
1263	       MAC_CTRL_ADD_CRC | MAC_CTRL_PAD |
1264	       MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN |
1265	       MAC_CTRL_HASH_ALG_CRC32;
1266	if (hw->mac_duplex)
1267		mac |= MAC_CTRL_DUPLX;
1268	else
1269		mac &= ~MAC_CTRL_DUPLX;
1270	mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed);
1271	mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len);
1272
1273	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq);
1274	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq);
1275	AT_WRITE_REG(hw, REG_MAC_CTRL, mac);
1276}
1277
1278/*
1279 * Reset the transmit and receive units; mask and clear all interrupts.
1280 * hw - Struct containing variables accessed by shared code
1281 * return : 0  or  idle status (if error)
1282 */
1283static int atl1c_reset_mac(struct atl1c_hw *hw)
1284{
1285	struct atl1c_adapter *adapter = hw->adapter;
1286	struct pci_dev *pdev = adapter->pdev;
1287	u32 ctrl_data = 0;
1288
1289	atl1c_stop_mac(hw);
1290	/*
1291	 * Issue Soft Reset to the MAC.  This will reset the chip's
1292	 * transmit, receive, DMA.  It will not effect
1293	 * the current PCI configuration.  The global reset bit is self-
1294	 * clearing, and should clear within a microsecond.
1295	 */
1296	AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data);
1297	ctrl_data |= MASTER_CTRL_OOB_DIS;
1298	AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST);
1299
1300	AT_WRITE_FLUSH(hw);
1301	msleep(10);
1302	/* Wait at least 10ms for All module to be Idle */
1303
1304	if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) {
1305		dev_err(&pdev->dev,
1306			"MAC state machine can't be idle since"
1307			" disabled for 10ms second\n");
1308		return -1;
1309	}
1310	AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data);
1311
1312	/* driver control speed/duplex */
1313	AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data);
1314	AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW);
1315
1316	/* clk switch setting */
1317	AT_READ_REG(hw, REG_SERDES, &ctrl_data);
1318	switch (hw->nic_type) {
1319	case athr_l2c_b:
1320		ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN |
1321				SERDES_MAC_CLK_SLOWDOWN);
1322		AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1323		break;
1324	case athr_l2c_b2:
1325	case athr_l1d_2:
1326		ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN;
1327		AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1328		break;
1329	default:
1330		break;
1331	}
1332
1333	return 0;
1334}
1335
1336static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
1337{
1338	u16 ctrl_flags = hw->ctrl_flags;
1339
1340	hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT);
1341	atl1c_set_aspm(hw, SPEED_0);
1342	hw->ctrl_flags = ctrl_flags;
1343}
1344
1345/*
1346 * Set ASPM state.
1347 * Enable/disable L0s/L1 depend on link state.
1348 */
1349static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
1350{
1351	u32 pm_ctrl_data;
1352	u32 link_l1_timer;
1353
1354	AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1355	pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN |
1356			  PM_CTRL_ASPM_L0S_EN |
1357			  PM_CTRL_MAC_ASPM_CHK);
1358	/* L1 timer */
1359	if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1360		pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S;
1361		link_l1_timer =
1362			link_speed == SPEED_1000 || link_speed == SPEED_100 ?
1363			L1D_PMCTRL_L1_ENTRY_TM_16US : 1;
1364		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1365			L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer);
1366	} else {
1367		link_l1_timer = hw->nic_type == athr_l2c_b ?
1368			L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM;
1369		if (link_speed != SPEED_1000 && link_speed != SPEED_100)
1370			link_l1_timer = 1;
1371		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1372			PM_CTRL_L1_ENTRY_TIMER, link_l1_timer);
1373	}
1374
1375	/* L0S/L1 enable */
1376	if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0)
1377		pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK;
1378	if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1379		pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK;
1380
1381	/* l2cb & l1d & l2cb2 & l1d2 */
1382	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1383	    hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1384		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1385			PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF);
1386		pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER |
1387				PM_CTRL_SERDES_PD_EX_L1 |
1388				PM_CTRL_CLK_SWH_L1;
1389		pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1390				  PM_CTRL_SERDES_PLL_L1_EN |
1391				  PM_CTRL_SERDES_BUFS_RX_L1_EN |
1392				  PM_CTRL_SA_DLY_EN |
1393				  PM_CTRL_HOTRST);
1394		/* disable l0s if link down or l2cb */
1395		if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b)
1396			pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1397	} else { /* l1c */
1398		pm_ctrl_data =
1399			FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0);
1400		if (link_speed != SPEED_0) {
1401			pm_ctrl_data |= PM_CTRL_SERDES_L1_EN |
1402					PM_CTRL_SERDES_PLL_L1_EN |
1403					PM_CTRL_SERDES_BUFS_RX_L1_EN;
1404			pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 |
1405					  PM_CTRL_CLK_SWH_L1 |
1406					  PM_CTRL_ASPM_L0S_EN |
1407					  PM_CTRL_ASPM_L1_EN);
1408		} else { /* link down */
1409			pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1410			pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1411					  PM_CTRL_SERDES_PLL_L1_EN |
1412					  PM_CTRL_SERDES_BUFS_RX_L1_EN |
1413					  PM_CTRL_ASPM_L0S_EN);
1414		}
1415	}
1416	AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1417
1418	return;
1419}
1420
1421/**
1422 * atl1c_configure_mac - Configure Transmit&Receive Unit after Reset
1423 * @adapter: board private structure
1424 *
1425 * Configure the Tx /Rx unit of the MAC after a reset.
1426 */
1427static int atl1c_configure_mac(struct atl1c_adapter *adapter)
1428{
1429	struct atl1c_hw *hw = &adapter->hw;
1430	u32 master_ctrl_data = 0;
1431	u32 intr_modrt_data;
1432	u32 data;
1433
1434	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
1435	master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN |
1436			      MASTER_CTRL_RX_ITIMER_EN |
1437			      MASTER_CTRL_INT_RDCLR);
1438	/* clear interrupt status */
1439	AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
1440	/*  Clear any WOL status */
1441	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
1442	/* set Interrupt Clear Timer
1443	 * HW will enable self to assert interrupt event to system after
1444	 * waiting x-time for software to notify it accept interrupt.
1445	 */
1446
1447	data = CLK_GATING_EN_ALL;
1448	if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
1449		if (hw->nic_type == athr_l2c_b)
1450			data &= ~CLK_GATING_RXMAC_EN;
1451	} else
1452		data = 0;
1453	AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
1454
1455	AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
1456		hw->ict & INT_RETRIG_TIMER_MASK);
1457
1458	atl1c_configure_des_ring(adapter);
1459
1460	if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) {
1461		intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) <<
1462					IRQ_MODRT_TX_TIMER_SHIFT;
1463		intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) <<
1464					IRQ_MODRT_RX_TIMER_SHIFT;
1465		AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1466		master_ctrl_data |=
1467			MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN;
1468	}
1469
1470	if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
1471		master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
1472
1473	master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
1474	AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1475
1476	AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1477		hw->smb_timer & SMB_STAT_TIMER_MASK);
1478
1479	/* set MTU */
1480	AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1481			VLAN_HLEN + ETH_FCS_LEN);
1482
1483	atl1c_configure_tx(adapter);
1484	atl1c_configure_rx(adapter);
1485	atl1c_configure_dma(adapter);
1486
1487	return 0;
1488}
1489
1490static int atl1c_configure(struct atl1c_adapter *adapter)
1491{
1492	struct net_device *netdev = adapter->netdev;
1493	int num;
1494	int i;
1495
1496	if (adapter->hw.nic_type == athr_mt) {
1497		u32 mode;
1498
1499		AT_READ_REG(&adapter->hw, REG_MT_MODE, &mode);
1500		if (adapter->rx_queue_count == 4)
1501			mode |= MT_MODE_4Q;
1502		else
1503			mode &= ~MT_MODE_4Q;
1504		AT_WRITE_REG(&adapter->hw, REG_MT_MODE, mode);
1505	}
1506
1507	atl1c_init_ring_ptrs(adapter);
1508	atl1c_set_multi(netdev);
1509	atl1c_restore_vlan(adapter);
1510
1511	for (i = 0; i < adapter->rx_queue_count; ++i) {
1512		num = atl1c_alloc_rx_buffer(adapter, i, false);
1513		if (unlikely(num == 0))
1514			return -ENOMEM;
1515	}
1516
1517	if (atl1c_configure_mac(adapter))
1518		return -EIO;
1519
1520	return 0;
1521}
1522
1523static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
1524{
1525	u16 hw_reg_addr = 0;
1526	unsigned long *stats_item = NULL;
1527	u32 data;
1528
1529	/* update rx status */
1530	hw_reg_addr = REG_MAC_RX_STATUS_BIN;
1531	stats_item  = &adapter->hw_stats.rx_ok;
1532	while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
1533		AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1534		*stats_item += data;
1535		stats_item++;
1536		hw_reg_addr += 4;
1537	}
1538/* update tx status */
1539	hw_reg_addr = REG_MAC_TX_STATUS_BIN;
1540	stats_item  = &adapter->hw_stats.tx_ok;
1541	while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
1542		AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1543		*stats_item += data;
1544		stats_item++;
1545		hw_reg_addr += 4;
1546	}
1547}
1548
1549/**
1550 * atl1c_get_stats - Get System Network Statistics
1551 * @netdev: network interface device structure
1552 *
1553 * Returns the address of the device statistics structure.
1554 * The statistics are actually updated from the timer callback.
1555 */
1556static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
1557{
1558	struct atl1c_adapter *adapter = netdev_priv(netdev);
1559	struct atl1c_hw_stats  *hw_stats = &adapter->hw_stats;
1560	struct net_device_stats *net_stats = &netdev->stats;
1561
1562	atl1c_update_hw_stats(adapter);
1563	net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
1564	net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
1565	net_stats->multicast  = hw_stats->rx_mcast;
1566	net_stats->collisions = hw_stats->tx_1_col +
1567				hw_stats->tx_2_col +
1568				hw_stats->tx_late_col +
1569				hw_stats->tx_abort_col;
1570
1571	net_stats->rx_errors  = hw_stats->rx_frag +
1572				hw_stats->rx_fcs_err +
1573				hw_stats->rx_len_err +
1574				hw_stats->rx_sz_ov +
1575				hw_stats->rx_rrd_ov +
1576				hw_stats->rx_align_err +
1577				hw_stats->rx_rxf_ov;
1578
1579	net_stats->rx_fifo_errors   = hw_stats->rx_rxf_ov;
1580	net_stats->rx_length_errors = hw_stats->rx_len_err;
1581	net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
1582	net_stats->rx_frame_errors  = hw_stats->rx_align_err;
1583	net_stats->rx_dropped       = hw_stats->rx_rrd_ov;
1584
1585	net_stats->tx_errors = hw_stats->tx_late_col +
1586			       hw_stats->tx_abort_col +
1587			       hw_stats->tx_underrun +
1588			       hw_stats->tx_trunc;
1589
1590	net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
1591	net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1592	net_stats->tx_window_errors  = hw_stats->tx_late_col;
1593
1594	net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1595	net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1596
1597	return net_stats;
1598}
1599
1600static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
1601{
1602	u16 phy_data;
1603
1604	spin_lock(&adapter->mdio_lock);
1605	atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data);
1606	spin_unlock(&adapter->mdio_lock);
1607}
1608
1609static int atl1c_clean_tx(struct napi_struct *napi, int budget)
1610{
1611	struct atl1c_tpd_ring *tpd_ring =
1612		container_of(napi, struct atl1c_tpd_ring, napi);
1613	struct atl1c_adapter *adapter = tpd_ring->adapter;
1614	struct netdev_queue *txq =
1615		netdev_get_tx_queue(napi->dev, tpd_ring->num);
1616	struct atl1c_buffer *buffer_info;
1617	struct pci_dev *pdev = adapter->pdev;
1618	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1619	u16 hw_next_to_clean;
1620	unsigned int total_bytes = 0, total_packets = 0;
1621	unsigned long flags;
1622
1623	AT_READ_REGW(&adapter->hw, atl1c_qregs[tpd_ring->num].tpd_cons,
1624		     &hw_next_to_clean);
1625
1626	while (next_to_clean != hw_next_to_clean) {
1627		buffer_info = &tpd_ring->buffer_info[next_to_clean];
1628		if (buffer_info->skb) {
1629			total_bytes += buffer_info->skb->len;
1630			total_packets++;
1631		}
1632		atl1c_clean_buffer(pdev, buffer_info);
1633		if (++next_to_clean == tpd_ring->count)
1634			next_to_clean = 0;
1635		atomic_set(&tpd_ring->next_to_clean, next_to_clean);
1636	}
1637
1638	netdev_tx_completed_queue(txq, total_packets, total_bytes);
1639
1640	if (netif_tx_queue_stopped(txq) && netif_carrier_ok(adapter->netdev))
1641		netif_tx_wake_queue(txq);
1642
1643	if (total_packets < budget) {
1644		napi_complete_done(napi, total_packets);
1645		spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
1646		adapter->hw.intr_mask |= atl1c_qregs[tpd_ring->num].tx_isr;
1647		AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
1648		spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
1649		return total_packets;
1650	}
1651	return budget;
1652}
1653
1654static void atl1c_intr_rx_tx(struct atl1c_adapter *adapter, u32 status)
1655{
1656	struct atl1c_hw *hw = &adapter->hw;
1657	u32 intr_mask;
1658	int i;
1659
1660	spin_lock(&hw->intr_mask_lock);
1661	intr_mask = hw->intr_mask;
1662	for (i = 0; i < adapter->rx_queue_count; ++i) {
1663		if (!(status & atl1c_qregs[i].rx_isr))
1664			continue;
1665		if (napi_schedule_prep(&adapter->rrd_ring[i].napi)) {
1666			intr_mask &= ~atl1c_qregs[i].rx_isr;
1667			__napi_schedule(&adapter->rrd_ring[i].napi);
1668		}
1669	}
1670	for (i = 0; i < adapter->tx_queue_count; ++i) {
1671		if (!(status & atl1c_qregs[i].tx_isr))
1672			continue;
1673		if (napi_schedule_prep(&adapter->tpd_ring[i].napi)) {
1674			intr_mask &= ~atl1c_qregs[i].tx_isr;
1675			__napi_schedule(&adapter->tpd_ring[i].napi);
1676		}
1677	}
1678
1679	if (hw->intr_mask != intr_mask) {
1680		hw->intr_mask = intr_mask;
1681		AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1682	}
1683	spin_unlock(&hw->intr_mask_lock);
1684}
1685
1686/**
1687 * atl1c_intr - Interrupt Handler
1688 * @irq: interrupt number
1689 * @data: pointer to a network interface device structure
1690 */
1691static irqreturn_t atl1c_intr(int irq, void *data)
1692{
1693	struct net_device *netdev  = data;
1694	struct atl1c_adapter *adapter = netdev_priv(netdev);
1695	struct pci_dev *pdev = adapter->pdev;
1696	struct atl1c_hw *hw = &adapter->hw;
1697	int max_ints = AT_MAX_INT_WORK;
1698	int handled = IRQ_NONE;
1699	u32 status;
1700	u32 reg_data;
1701
1702	do {
1703		AT_READ_REG(hw, REG_ISR, &reg_data);
1704		status = reg_data & hw->intr_mask;
1705
1706		if (status == 0 || (status & ISR_DIS_INT) != 0) {
1707			if (max_ints != AT_MAX_INT_WORK)
1708				handled = IRQ_HANDLED;
1709			break;
1710		}
1711		/* link event */
1712		if (status & ISR_GPHY)
1713			atl1c_clear_phy_int(adapter);
1714		/* Ack ISR */
1715		AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
1716		if (status & (ISR_RX_PKT | ISR_TX_PKT))
1717			atl1c_intr_rx_tx(adapter, status);
1718
1719		handled = IRQ_HANDLED;
1720		/* check if PCIE PHY Link down */
1721		if (status & ISR_ERROR) {
1722			if (netif_msg_hw(adapter))
1723				dev_err(&pdev->dev,
1724					"atl1c hardware error (status = 0x%x)\n",
1725					status & ISR_ERROR);
1726			/* reset MAC */
1727			set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
1728			schedule_work(&adapter->common_task);
1729			return IRQ_HANDLED;
1730		}
1731
1732		if (status & ISR_OVER)
1733			if (netif_msg_intr(adapter))
1734				dev_warn(&pdev->dev,
1735					"TX/RX overflow (status = 0x%x)\n",
1736					status & ISR_OVER);
1737
1738		/* link event */
1739		if (status & (ISR_GPHY | ISR_MANUAL)) {
1740			netdev->stats.tx_carrier_errors++;
1741			atl1c_link_chg_event(adapter);
1742			break;
1743		}
1744
1745	} while (--max_ints > 0);
1746	/* re-enable Interrupt*/
1747	AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
1748	return handled;
1749}
1750
1751static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1752		  struct sk_buff *skb, struct atl1c_recv_ret_status *prrs)
1753{
1754	if (adapter->hw.nic_type == athr_mt) {
1755		if (prrs->word3 & RRS_MT_PROT_ID_TCPUDP)
1756			skb->ip_summed = CHECKSUM_UNNECESSARY;
1757		return;
1758	}
1759	/*
1760	 * The pid field in RRS in not correct sometimes, so we
1761	 * cannot figure out if the packet is fragmented or not,
1762	 * so we tell the KERNEL CHECKSUM_NONE
1763	 */
1764	skb_checksum_none_assert(skb);
1765}
1766
1767static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
1768				       u32 queue, bool napi_mode)
1769{
1770	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
1771	struct sk_buff *skb;
1772	struct page *page;
1773
1774	if (adapter->rx_frag_size > PAGE_SIZE) {
1775		if (likely(napi_mode))
1776			return napi_alloc_skb(&rrd_ring->napi,
1777					      adapter->rx_buffer_len);
1778		else
1779			return netdev_alloc_skb_ip_align(adapter->netdev,
1780							 adapter->rx_buffer_len);
1781	}
1782
1783	page = rrd_ring->rx_page;
1784	if (!page) {
1785		page = alloc_page(GFP_ATOMIC);
1786		if (unlikely(!page))
1787			return NULL;
1788		rrd_ring->rx_page = page;
1789		rrd_ring->rx_page_offset = 0;
1790	}
1791
1792	skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
1793			adapter->rx_frag_size);
1794	if (likely(skb)) {
1795		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1796		rrd_ring->rx_page_offset += adapter->rx_frag_size;
1797		if (rrd_ring->rx_page_offset >= PAGE_SIZE)
1798			rrd_ring->rx_page = NULL;
1799		else
1800			get_page(page);
1801	}
1802	return skb;
1803}
1804
1805static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
1806				 bool napi_mode)
1807{
1808	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
 
1809	struct pci_dev *pdev = adapter->pdev;
1810	struct atl1c_buffer *buffer_info, *next_info;
1811	struct sk_buff *skb;
1812	void *vir_addr = NULL;
1813	u16 num_alloc = 0;
1814	u16 rfd_next_to_use, next_next;
1815	struct atl1c_rx_free_desc *rfd_desc;
1816	dma_addr_t mapping;
1817
1818	next_next = rfd_next_to_use = rfd_ring->next_to_use;
1819	if (++next_next == rfd_ring->count)
1820		next_next = 0;
1821	buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1822	next_info = &rfd_ring->buffer_info[next_next];
1823
1824	while (next_info->flags & ATL1C_BUFFER_FREE) {
1825		rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
1826
1827		skb = atl1c_alloc_skb(adapter, queue, napi_mode);
 
 
 
 
 
 
 
 
 
 
 
1828		if (unlikely(!skb)) {
1829			if (netif_msg_rx_err(adapter))
1830				dev_warn(&pdev->dev, "alloc rx buffer failed\n");
1831			break;
1832		}
1833
 
 
 
1834		/*
1835		 * Make buffer alignment 2 beyond a 16 byte boundary
1836		 * this will result in a 16 byte aligned IP header after
1837		 * the 14 byte MAC header is removed
1838		 */
1839		vir_addr = skb->data;
1840		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1841		buffer_info->skb = skb;
1842		buffer_info->length = adapter->rx_buffer_len;
1843		mapping = dma_map_single(&pdev->dev, vir_addr,
1844					 buffer_info->length, DMA_FROM_DEVICE);
1845		if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
1846			dev_kfree_skb(skb);
1847			buffer_info->skb = NULL;
1848			buffer_info->length = 0;
1849			ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
1850			netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed");
1851			break;
1852		}
1853		buffer_info->dma = mapping;
1854		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
1855			ATL1C_PCIMAP_FROMDEVICE);
1856		rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1857		rfd_next_to_use = next_next;
1858		if (++next_next == rfd_ring->count)
1859			next_next = 0;
1860		buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1861		next_info = &rfd_ring->buffer_info[next_next];
1862		num_alloc++;
1863	}
1864
1865	if (num_alloc) {
1866		/* TODO: update mailbox here */
1867		wmb();
1868		rfd_ring->next_to_use = rfd_next_to_use;
1869		AT_WRITE_REG(&adapter->hw, atl1c_qregs[queue].rfd_prod,
1870			     rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
1871	}
1872
1873	return num_alloc;
1874}
1875
1876static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring,
1877			struct	atl1c_recv_ret_status *rrs, u16 num)
1878{
1879	u16 i;
1880	/* the relationship between rrd and rfd is one map one */
1881	for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring,
1882					rrd_ring->next_to_clean)) {
1883		rrs->word3 &= ~RRS_RXD_UPDATED;
1884		if (++rrd_ring->next_to_clean == rrd_ring->count)
1885			rrd_ring->next_to_clean = 0;
1886	}
1887}
1888
1889static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1890	struct atl1c_recv_ret_status *rrs, u16 num)
1891{
1892	u16 i;
1893	u16 rfd_index;
1894	struct atl1c_buffer *buffer_info = rfd_ring->buffer_info;
1895
1896	rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1897			RRS_RX_RFD_INDEX_MASK;
1898	for (i = 0; i < num; i++) {
1899		buffer_info[rfd_index].skb = NULL;
1900		ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index],
1901					ATL1C_BUFFER_FREE);
1902		if (++rfd_index == rfd_ring->count)
1903			rfd_index = 0;
1904	}
1905	rfd_ring->next_to_clean = rfd_index;
1906}
1907
1908/**
1909 * atl1c_clean_rx - NAPI Rx polling callback
1910 * @napi: napi info
1911 * @budget: limit of packets to clean
1912 */
1913static int atl1c_clean_rx(struct napi_struct *napi, int budget)
1914{
1915	struct atl1c_rrd_ring *rrd_ring =
1916		container_of(napi, struct atl1c_rrd_ring, napi);
1917	struct atl1c_adapter *adapter = rrd_ring->adapter;
1918	u16 rfd_num, rfd_index;
1919	u16 length;
1920	struct pci_dev *pdev = adapter->pdev;
1921	struct net_device *netdev  = adapter->netdev;
1922	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[rrd_ring->num];
1923	struct sk_buff *skb;
1924	struct atl1c_recv_ret_status *rrs;
1925	struct atl1c_buffer *buffer_info;
1926	int work_done = 0;
1927	unsigned long flags;
1928
1929	/* Keep link state information with original netdev */
1930	if (!netif_carrier_ok(adapter->netdev))
1931		goto quit_polling;
1932
1933	while (1) {
1934		if (work_done >= budget)
1935			break;
1936		rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean);
1937		if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
1938			rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
1939				RRS_RX_RFD_CNT_MASK;
1940			if (unlikely(rfd_num != 1))
1941				/* TODO support mul rfd*/
1942				if (netif_msg_rx_err(adapter))
1943					dev_warn(&pdev->dev,
1944						"Multi rfd not support yet!\n");
1945			goto rrs_checked;
1946		} else {
1947			break;
1948		}
1949rrs_checked:
1950		atl1c_clean_rrd(rrd_ring, rrs, rfd_num);
1951		if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) {
1952			atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1953			if (netif_msg_rx_err(adapter))
1954				dev_warn(&pdev->dev,
1955					 "wrong packet! rrs word3 is %x\n",
1956					 rrs->word3);
1957			continue;
1958		}
1959
1960		length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) &
1961				RRS_PKT_SIZE_MASK);
1962		/* Good Receive */
1963		if (likely(rfd_num == 1)) {
1964			rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1965					RRS_RX_RFD_INDEX_MASK;
1966			buffer_info = &rfd_ring->buffer_info[rfd_index];
1967			dma_unmap_single(&pdev->dev, buffer_info->dma,
1968					 buffer_info->length, DMA_FROM_DEVICE);
1969			skb = buffer_info->skb;
1970		} else {
1971			/* TODO */
1972			if (netif_msg_rx_err(adapter))
1973				dev_warn(&pdev->dev,
1974					"Multi rfd not support yet!\n");
1975			break;
1976		}
1977		atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1978		skb_put(skb, length - ETH_FCS_LEN);
1979		skb->protocol = eth_type_trans(skb, netdev);
1980		atl1c_rx_checksum(adapter, skb, rrs);
1981		if (rrs->word3 & RRS_VLAN_INS) {
1982			u16 vlan;
1983
1984			AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
1985			vlan = le16_to_cpu(vlan);
1986			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
1987		}
1988		napi_gro_receive(napi, skb);
1989
1990		work_done++;
1991	}
1992	if (work_done)
1993		atl1c_alloc_rx_buffer(adapter, rrd_ring->num, true);
1994
1995	if (work_done < budget) {
1996quit_polling:
1997		napi_complete_done(napi, work_done);
1998		spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
1999		adapter->hw.intr_mask |= atl1c_qregs[rrd_ring->num].rx_isr;
2000		AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
2001		spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
2002	}
2003	return work_done;
2004}
2005
2006#ifdef CONFIG_NET_POLL_CONTROLLER
2007
2008/*
2009 * Polling 'interrupt' - used by things like netconsole to send skbs
2010 * without having to re-enable interrupts. It's not called while
2011 * the interrupt routine is executing.
2012 */
2013static void atl1c_netpoll(struct net_device *netdev)
2014{
2015	struct atl1c_adapter *adapter = netdev_priv(netdev);
2016
2017	disable_irq(adapter->pdev->irq);
2018	atl1c_intr(adapter->pdev->irq, netdev);
2019	enable_irq(adapter->pdev->irq);
2020}
2021#endif
2022
2023static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, u32 queue)
2024{
2025	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
2026	u16 next_to_use = 0;
2027	u16 next_to_clean = 0;
2028
2029	next_to_clean = atomic_read(&tpd_ring->next_to_clean);
2030	next_to_use   = tpd_ring->next_to_use;
2031
2032	return (u16)(next_to_clean > next_to_use) ?
2033		(next_to_clean - next_to_use - 1) :
2034		(tpd_ring->count + next_to_clean - next_to_use - 1);
2035}
2036
2037/*
2038 * get next usable tpd
2039 * Note: should call atl1c_tdp_avail to make sure
2040 * there is enough tpd to use
2041 */
2042static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter,
2043					    u32 queue)
2044{
2045	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
2046	struct atl1c_tpd_desc *tpd_desc;
2047	u16 next_to_use = 0;
2048
2049	next_to_use = tpd_ring->next_to_use;
2050	if (++tpd_ring->next_to_use == tpd_ring->count)
2051		tpd_ring->next_to_use = 0;
2052	tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use);
2053	memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc));
2054	return	tpd_desc;
2055}
2056
2057static struct atl1c_buffer *
2058atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd)
2059{
2060	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
2061
2062	return &tpd_ring->buffer_info[tpd -
2063			(struct atl1c_tpd_desc *)tpd_ring->desc];
2064}
2065
2066/* Calculate the transmit packet descript needed*/
2067static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
2068{
2069	u16 tpd_req;
2070	u16 proto_hdr_len = 0;
2071
2072	tpd_req = skb_shinfo(skb)->nr_frags + 1;
2073
2074	if (skb_is_gso(skb)) {
2075		proto_hdr_len = skb_tcp_all_headers(skb);
2076		if (proto_hdr_len < skb_headlen(skb))
2077			tpd_req++;
2078		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
2079			tpd_req++;
2080	}
2081	return tpd_req;
2082}
2083
2084static int atl1c_tso_csum(struct atl1c_adapter *adapter,
2085			  struct sk_buff *skb,
2086			  struct atl1c_tpd_desc **tpd,
2087			  u32 queue)
2088{
2089	struct pci_dev *pdev = adapter->pdev;
2090	unsigned short offload_type;
2091	u8 hdr_len;
2092	u32 real_len;
2093
2094	if (skb_is_gso(skb)) {
2095		int err;
2096
2097		err = skb_cow_head(skb, 0);
2098		if (err < 0)
2099			return err;
2100
2101		offload_type = skb_shinfo(skb)->gso_type;
2102
2103		if (offload_type & SKB_GSO_TCPV4) {
2104			real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
2105					+ ntohs(ip_hdr(skb)->tot_len));
2106
2107			if (real_len < skb->len)
2108				pskb_trim(skb, real_len);
 
 
 
2109
2110			hdr_len = skb_tcp_all_headers(skb);
2111			if (unlikely(skb->len == hdr_len)) {
2112				/* only xsum need */
2113				if (netif_msg_tx_queued(adapter))
2114					dev_warn(&pdev->dev,
2115						"IPV4 tso with zero data??\n");
2116				goto check_sum;
2117			} else {
2118				ip_hdr(skb)->check = 0;
2119				tcp_hdr(skb)->check = ~csum_tcpudp_magic(
2120							ip_hdr(skb)->saddr,
2121							ip_hdr(skb)->daddr,
2122							0, IPPROTO_TCP, 0);
2123				(*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT;
2124			}
2125		}
2126
2127		if (offload_type & SKB_GSO_TCPV6) {
2128			struct atl1c_tpd_ext_desc *etpd =
2129				*(struct atl1c_tpd_ext_desc **)(tpd);
2130
2131			memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
2132			*tpd = atl1c_get_tpd(adapter, queue);
2133			ipv6_hdr(skb)->payload_len = 0;
2134			/* check payload == 0 byte ? */
2135			hdr_len = skb_tcp_all_headers(skb);
2136			if (unlikely(skb->len == hdr_len)) {
2137				/* only xsum need */
2138				if (netif_msg_tx_queued(adapter))
2139					dev_warn(&pdev->dev,
2140						"IPV6 tso with zero data??\n");
2141				goto check_sum;
2142			} else
2143				tcp_v6_gso_csum_prep(skb);
2144
2145			etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
2146			etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
2147			etpd->pkt_len = cpu_to_le32(skb->len);
2148			(*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT;
2149		}
2150
2151		(*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT;
2152		(*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) <<
2153				TPD_TCPHDR_OFFSET_SHIFT;
2154		(*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) <<
2155				TPD_MSS_SHIFT;
2156		return 0;
2157	}
2158
2159check_sum:
2160	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2161		u8 css, cso;
2162		cso = skb_checksum_start_offset(skb);
2163
2164		if (unlikely(cso & 0x1)) {
2165			if (netif_msg_tx_err(adapter))
2166				dev_err(&adapter->pdev->dev,
2167					"payload offset should not an event number\n");
2168			return -1;
2169		} else {
2170			css = cso + skb->csum_offset;
2171
2172			(*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) <<
2173					TPD_PLOADOFFSET_SHIFT;
2174			(*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) <<
2175					TPD_CCSUM_OFFSET_SHIFT;
2176			(*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT;
2177		}
2178	}
2179	return 0;
2180}
2181
2182static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
2183			      struct atl1c_tpd_desc *first_tpd,
2184			      u32 queue)
2185{
2186	struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[queue];
2187	struct atl1c_buffer *buffer_info;
2188	struct atl1c_tpd_desc *tpd;
2189	u16 first_index, index;
2190
2191	first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
2192	index = first_index;
2193	while (index != tpd_ring->next_to_use) {
2194		tpd = ATL1C_TPD_DESC(tpd_ring, index);
2195		buffer_info = &tpd_ring->buffer_info[index];
2196		atl1c_clean_buffer(adpt->pdev, buffer_info);
2197		memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
2198		if (++index == tpd_ring->count)
2199			index = 0;
2200	}
2201	tpd_ring->next_to_use = first_index;
2202}
2203
2204static int atl1c_tx_map(struct atl1c_adapter *adapter,
2205			struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
2206			u32 queue)
2207{
2208	struct atl1c_tpd_desc *use_tpd = NULL;
2209	struct atl1c_buffer *buffer_info = NULL;
2210	u16 buf_len = skb_headlen(skb);
2211	u16 map_len = 0;
2212	u16 mapped_len = 0;
2213	u16 hdr_len = 0;
2214	u16 nr_frags;
2215	u16 f;
2216	int tso;
2217
2218	nr_frags = skb_shinfo(skb)->nr_frags;
2219	tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
2220	if (tso) {
2221		/* TSO */
2222		hdr_len = skb_tcp_all_headers(skb);
2223		map_len = hdr_len;
2224		use_tpd = tpd;
2225
2226		buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2227		buffer_info->length = map_len;
2228		buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2229						  skb->data, hdr_len,
2230						  DMA_TO_DEVICE);
2231		if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
2232			goto err_dma;
2233		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2234		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
2235			ATL1C_PCIMAP_TODEVICE);
2236		mapped_len += map_len;
2237		use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2238		use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
2239	}
2240
2241	if (mapped_len < buf_len) {
2242		/* mapped_len == 0, means we should use the first tpd,
2243		   which is given by caller  */
2244		if (mapped_len == 0)
2245			use_tpd = tpd;
2246		else {
2247			use_tpd = atl1c_get_tpd(adapter, queue);
2248			memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2249		}
2250		buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2251		buffer_info->length = buf_len - mapped_len;
2252		buffer_info->dma =
2253			dma_map_single(&adapter->pdev->dev,
2254				       skb->data + mapped_len,
2255				       buffer_info->length, DMA_TO_DEVICE);
2256		if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
2257			goto err_dma;
2258
2259		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2260		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
2261			ATL1C_PCIMAP_TODEVICE);
2262		use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2263		use_tpd->buffer_len  = cpu_to_le16(buffer_info->length);
2264	}
2265
2266	for (f = 0; f < nr_frags; f++) {
2267		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2268
2269		use_tpd = atl1c_get_tpd(adapter, queue);
2270		memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2271
2272		buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2273		buffer_info->length = skb_frag_size(frag);
2274		buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
2275						    frag, 0,
2276						    buffer_info->length,
2277						    DMA_TO_DEVICE);
2278		if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
2279			goto err_dma;
2280
2281		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2282		ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
2283			ATL1C_PCIMAP_TODEVICE);
2284		use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2285		use_tpd->buffer_len  = cpu_to_le16(buffer_info->length);
2286	}
2287
2288	/* The last tpd */
2289	use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
2290	/* The last buffer info contain the skb address,
2291	   so it will be free after unmap */
2292	buffer_info->skb = skb;
2293
2294	return 0;
2295
2296err_dma:
2297	buffer_info->dma = 0;
2298	buffer_info->length = 0;
2299	return -1;
2300}
2301
2302static void atl1c_tx_queue(struct atl1c_adapter *adapter, u32 queue)
2303{
2304	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
2305
2306	AT_WRITE_REGW(&adapter->hw, atl1c_qregs[queue].tpd_prod,
2307		      tpd_ring->next_to_use);
2308}
2309
2310static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
2311					  struct net_device *netdev)
2312{
2313	struct atl1c_adapter *adapter = netdev_priv(netdev);
2314	u32 queue = skb_get_queue_mapping(skb);
2315	struct netdev_queue *txq = netdev_get_tx_queue(netdev, queue);
2316	struct atl1c_tpd_desc *tpd;
2317	u16 tpd_req;
2318
2319	if (test_bit(__AT_DOWN, &adapter->flags)) {
2320		dev_kfree_skb_any(skb);
2321		return NETDEV_TX_OK;
2322	}
2323
2324	tpd_req = atl1c_cal_tpd_req(skb);
2325
2326	if (atl1c_tpd_avail(adapter, queue) < tpd_req) {
2327		/* no enough descriptor, just stop queue */
2328		atl1c_tx_queue(adapter, queue);
2329		netif_tx_stop_queue(txq);
2330		return NETDEV_TX_BUSY;
2331	}
2332
2333	tpd = atl1c_get_tpd(adapter, queue);
2334
2335	/* do TSO and check sum */
2336	if (atl1c_tso_csum(adapter, skb, &tpd, queue) != 0) {
2337		atl1c_tx_queue(adapter, queue);
2338		dev_kfree_skb_any(skb);
2339		return NETDEV_TX_OK;
2340	}
2341
2342	if (unlikely(skb_vlan_tag_present(skb))) {
2343		u16 vlan = skb_vlan_tag_get(skb);
2344		__le16 tag;
2345
2346		vlan = cpu_to_le16(vlan);
2347		AT_VLAN_TO_TAG(vlan, tag);
2348		tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT;
2349		tpd->vlan_tag = tag;
2350	}
2351
2352	if (skb_network_offset(skb) != ETH_HLEN)
2353		tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
2354
2355	if (atl1c_tx_map(adapter, skb, tpd, queue) < 0) {
2356		netif_info(adapter, tx_done, adapter->netdev,
2357			   "tx-skb dropped due to dma error\n");
2358		/* roll back tpd/buffer */
2359		atl1c_tx_rollback(adapter, tpd, queue);
2360		dev_kfree_skb_any(skb);
2361	} else {
2362		bool more = netdev_xmit_more();
2363
2364		if (__netdev_tx_sent_queue(txq, skb->len, more))
2365			atl1c_tx_queue(adapter, queue);
2366	}
2367
2368	return NETDEV_TX_OK;
2369}
2370
2371static void atl1c_free_irq(struct atl1c_adapter *adapter)
2372{
2373	struct net_device *netdev = adapter->netdev;
2374
2375	free_irq(adapter->pdev->irq, netdev);
2376
2377	if (adapter->have_msi)
2378		pci_disable_msi(adapter->pdev);
2379}
2380
2381static int atl1c_request_irq(struct atl1c_adapter *adapter)
2382{
2383	struct pci_dev    *pdev   = adapter->pdev;
2384	struct net_device *netdev = adapter->netdev;
2385	int flags = 0;
2386	int err = 0;
2387
2388	adapter->have_msi = true;
2389	err = pci_enable_msi(adapter->pdev);
2390	if (err) {
2391		if (netif_msg_ifup(adapter))
2392			dev_err(&pdev->dev,
2393				"Unable to allocate MSI interrupt Error: %d\n",
2394				err);
2395		adapter->have_msi = false;
2396	}
2397
2398	if (!adapter->have_msi)
2399		flags |= IRQF_SHARED;
2400	err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
2401			netdev->name, netdev);
2402	if (err) {
2403		if (netif_msg_ifup(adapter))
2404			dev_err(&pdev->dev,
2405				"Unable to allocate interrupt Error: %d\n",
2406				err);
2407		if (adapter->have_msi)
2408			pci_disable_msi(adapter->pdev);
2409		return err;
2410	}
2411	if (netif_msg_ifup(adapter))
2412		dev_dbg(&pdev->dev, "atl1c_request_irq OK\n");
2413	return err;
2414}
2415
2416
2417static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter)
2418{
2419	int i;
2420	/* release tx-pending skbs and reset tx/rx ring index */
2421	for (i = 0; i < adapter->tx_queue_count; ++i)
2422		atl1c_clean_tx_ring(adapter, i);
2423	for (i = 0; i < adapter->rx_queue_count; ++i)
2424		atl1c_clean_rx_ring(adapter, i);
2425}
2426
2427static int atl1c_up(struct atl1c_adapter *adapter)
2428{
2429	struct net_device *netdev = adapter->netdev;
2430	int err;
2431	int i;
2432
2433	netif_carrier_off(netdev);
2434
2435	err = atl1c_configure(adapter);
2436	if (unlikely(err))
2437		goto err_up;
2438
2439	err = atl1c_request_irq(adapter);
2440	if (unlikely(err))
2441		goto err_up;
2442
2443	atl1c_check_link_status(adapter);
2444	clear_bit(__AT_DOWN, &adapter->flags);
2445	for (i = 0; i < adapter->tx_queue_count; ++i)
2446		napi_enable(&adapter->tpd_ring[i].napi);
2447	for (i = 0; i < adapter->rx_queue_count; ++i)
2448		napi_enable(&adapter->rrd_ring[i].napi);
2449	atl1c_irq_enable(adapter);
2450	netif_start_queue(netdev);
2451	return err;
2452
2453err_up:
2454	for (i = 0; i < adapter->rx_queue_count; ++i)
2455		atl1c_clean_rx_ring(adapter, i);
2456	return err;
2457}
2458
2459static void atl1c_down(struct atl1c_adapter *adapter)
2460{
2461	struct net_device *netdev = adapter->netdev;
2462	int i;
2463
2464	atl1c_del_timer(adapter);
2465	adapter->work_event = 0; /* clear all event */
2466	/* signal that we're down so the interrupt handler does not
2467	 * reschedule our watchdog timer */
2468	set_bit(__AT_DOWN, &adapter->flags);
2469	netif_carrier_off(netdev);
2470	for (i = 0; i < adapter->tx_queue_count; ++i)
2471		napi_disable(&adapter->tpd_ring[i].napi);
2472	for (i = 0; i < adapter->rx_queue_count; ++i)
2473		napi_disable(&adapter->rrd_ring[i].napi);
2474	atl1c_irq_disable(adapter);
2475	atl1c_free_irq(adapter);
2476	/* disable ASPM if device inactive */
2477	atl1c_disable_l0s_l1(&adapter->hw);
2478	/* reset MAC to disable all RX/TX */
2479	atl1c_reset_mac(&adapter->hw);
2480	msleep(1);
2481
2482	adapter->link_speed = SPEED_0;
2483	adapter->link_duplex = -1;
2484	atl1c_reset_dma_ring(adapter);
2485}
2486
2487/**
2488 * atl1c_open - Called when a network interface is made active
2489 * @netdev: network interface device structure
2490 *
2491 * Returns 0 on success, negative value on failure
2492 *
2493 * The open entry point is called when a network interface is made
2494 * active by the system (IFF_UP).  At this point all resources needed
2495 * for transmit and receive operations are allocated, the interrupt
2496 * handler is registered with the OS, the watchdog timer is started,
2497 * and the stack is notified that the interface is ready.
2498 */
2499static int atl1c_open(struct net_device *netdev)
2500{
2501	struct atl1c_adapter *adapter = netdev_priv(netdev);
2502	int err;
2503
2504	/* disallow open during test */
2505	if (test_bit(__AT_TESTING, &adapter->flags))
2506		return -EBUSY;
2507
2508	/* allocate rx/tx dma buffer & descriptors */
2509	err = atl1c_setup_ring_resources(adapter);
2510	if (unlikely(err))
2511		return err;
2512
2513	err = atl1c_up(adapter);
2514	if (unlikely(err))
2515		goto err_up;
2516
2517	return 0;
2518
2519err_up:
2520	atl1c_free_irq(adapter);
2521	atl1c_free_ring_resources(adapter);
2522	atl1c_reset_mac(&adapter->hw);
2523	return err;
2524}
2525
2526/**
2527 * atl1c_close - Disables a network interface
2528 * @netdev: network interface device structure
2529 *
2530 * Returns 0, this is not allowed to fail
2531 *
2532 * The close entry point is called when an interface is de-activated
2533 * by the OS.  The hardware is still under the drivers control, but
2534 * needs to be disabled.  A global MAC reset is issued to stop the
2535 * hardware, and all transmit and receive resources are freed.
2536 */
2537static int atl1c_close(struct net_device *netdev)
2538{
2539	struct atl1c_adapter *adapter = netdev_priv(netdev);
2540
2541	WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2542	set_bit(__AT_DOWN, &adapter->flags);
2543	cancel_work_sync(&adapter->common_task);
2544	atl1c_down(adapter);
2545	atl1c_free_ring_resources(adapter);
2546	return 0;
2547}
2548
2549static int atl1c_suspend(struct device *dev)
2550{
2551	struct net_device *netdev = dev_get_drvdata(dev);
2552	struct atl1c_adapter *adapter = netdev_priv(netdev);
2553	struct atl1c_hw *hw = &adapter->hw;
2554	u32 wufc = adapter->wol;
2555
2556	atl1c_disable_l0s_l1(hw);
2557	if (netif_running(netdev)) {
2558		WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2559		atl1c_down(adapter);
2560	}
2561	netif_device_detach(netdev);
2562
2563	if (wufc)
2564		if (atl1c_phy_to_ps_link(hw) != 0)
2565			dev_dbg(dev, "phy power saving failed");
2566
2567	atl1c_power_saving(hw, wufc);
2568
2569	return 0;
2570}
2571
2572#ifdef CONFIG_PM_SLEEP
2573static int atl1c_resume(struct device *dev)
2574{
2575	struct net_device *netdev = dev_get_drvdata(dev);
2576	struct atl1c_adapter *adapter = netdev_priv(netdev);
2577
2578	AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2579	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2580
2581	atl1c_phy_reset(&adapter->hw);
2582	atl1c_reset_mac(&adapter->hw);
2583	atl1c_phy_init(&adapter->hw);
2584
2585	netif_device_attach(netdev);
2586	if (netif_running(netdev))
2587		atl1c_up(adapter);
2588
2589	return 0;
2590}
2591#endif
2592
2593static void atl1c_shutdown(struct pci_dev *pdev)
2594{
2595	struct net_device *netdev = pci_get_drvdata(pdev);
2596	struct atl1c_adapter *adapter = netdev_priv(netdev);
2597
2598	atl1c_suspend(&pdev->dev);
2599	pci_wake_from_d3(pdev, adapter->wol);
2600	pci_set_power_state(pdev, PCI_D3hot);
2601}
2602
2603static const struct net_device_ops atl1c_netdev_ops = {
2604	.ndo_open		= atl1c_open,
2605	.ndo_stop		= atl1c_close,
2606	.ndo_validate_addr	= eth_validate_addr,
2607	.ndo_start_xmit		= atl1c_xmit_frame,
2608	.ndo_set_mac_address	= atl1c_set_mac_addr,
2609	.ndo_set_rx_mode	= atl1c_set_multi,
2610	.ndo_change_mtu		= atl1c_change_mtu,
2611	.ndo_fix_features	= atl1c_fix_features,
2612	.ndo_set_features	= atl1c_set_features,
2613	.ndo_eth_ioctl		= atl1c_ioctl,
2614	.ndo_tx_timeout		= atl1c_tx_timeout,
2615	.ndo_get_stats		= atl1c_get_stats,
2616#ifdef CONFIG_NET_POLL_CONTROLLER
2617	.ndo_poll_controller	= atl1c_netpoll,
2618#endif
2619};
2620
2621static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2622{
2623	SET_NETDEV_DEV(netdev, &pdev->dev);
2624	pci_set_drvdata(pdev, netdev);
2625
2626	netdev->netdev_ops = &atl1c_netdev_ops;
2627	netdev->watchdog_timeo = AT_TX_WATCHDOG;
2628	netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
2629	atl1c_set_ethtool_ops(netdev);
2630
2631	/* TODO: add when ready */
2632	netdev->hw_features =	NETIF_F_SG		|
2633				NETIF_F_HW_CSUM		|
2634				NETIF_F_HW_VLAN_CTAG_RX	|
2635				NETIF_F_TSO		|
2636				NETIF_F_TSO6;
2637	netdev->features =	netdev->hw_features	|
2638				NETIF_F_HW_VLAN_CTAG_TX;
2639	return 0;
2640}
2641
2642/**
2643 * atl1c_probe - Device Initialization Routine
2644 * @pdev: PCI device information struct
2645 * @ent: entry in atl1c_pci_tbl
2646 *
2647 * Returns 0 on success, negative on failure
2648 *
2649 * atl1c_probe initializes an adapter identified by a pci_dev structure.
2650 * The OS initialization, configuring of the adapter private structure,
2651 * and a hardware reset occur.
2652 */
2653static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2654{
2655	struct net_device *netdev;
2656	struct atl1c_adapter *adapter;
2657	static int cards_found;
2658	u8 __iomem *hw_addr;
2659	enum atl1c_nic_type nic_type;
2660	u32 queue_count = 1;
2661	int err = 0;
2662	int i;
2663
2664	/* enable device (incl. PCI PM wakeup and hotplug setup) */
2665	err = pci_enable_device_mem(pdev);
2666	if (err)
2667		return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
2668
2669	/*
2670	 * The atl1c chip can DMA to 64-bit addresses, but it uses a single
2671	 * shared register for the high 32 bits, so only a single, aligned,
2672	 * 4 GB physical address range can be used at a time.
2673	 *
2674	 * Supporting 64-bit DMA on this hardware is more trouble than it's
2675	 * worth.  It is far easier to limit to 32-bit DMA than update
2676	 * various kernel subsystems to support the mechanics required by a
2677	 * fixed-high-32-bit system.
2678	 */
2679	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2680	if (err) {
2681		dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
2682		goto err_dma;
2683	}
2684
2685	err = pci_request_regions(pdev, atl1c_driver_name);
2686	if (err) {
2687		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2688		goto err_pci_reg;
2689	}
2690
2691	pci_set_master(pdev);
2692
2693	hw_addr = pci_ioremap_bar(pdev, 0);
2694	if (!hw_addr) {
2695		err = -EIO;
2696		dev_err(&pdev->dev, "cannot map device registers\n");
2697		goto err_ioremap;
2698	}
2699
2700	nic_type = atl1c_get_mac_type(pdev, hw_addr);
2701	if (nic_type == athr_mt)
2702		queue_count = 4;
2703
2704	netdev = alloc_etherdev_mq(sizeof(struct atl1c_adapter), queue_count);
2705	if (netdev == NULL) {
2706		err = -ENOMEM;
2707		goto err_alloc_etherdev;
2708	}
2709
2710	err = atl1c_init_netdev(netdev, pdev);
2711	if (err) {
2712		dev_err(&pdev->dev, "init netdevice failed\n");
2713		goto err_init_netdev;
2714	}
2715	adapter = netdev_priv(netdev);
2716	adapter->bd_number = cards_found;
2717	adapter->netdev = netdev;
2718	adapter->pdev = pdev;
2719	adapter->hw.adapter = adapter;
2720	adapter->hw.nic_type = nic_type;
2721	adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
2722	adapter->hw.hw_addr = hw_addr;
2723	adapter->tx_queue_count = queue_count;
2724	adapter->rx_queue_count = queue_count;
2725
2726	/* init mii data */
2727	adapter->mii.dev = netdev;
2728	adapter->mii.mdio_read  = atl1c_mdio_read;
2729	adapter->mii.mdio_write = atl1c_mdio_write;
2730	adapter->mii.phy_id_mask = 0x1f;
2731	adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
2732	dev_set_threaded(netdev, true);
2733	for (i = 0; i < adapter->rx_queue_count; ++i)
2734		netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
2735			       atl1c_clean_rx);
2736	for (i = 0; i < adapter->tx_queue_count; ++i)
2737		netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
2738				  atl1c_clean_tx);
2739	timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
2740	/* setup the private structure */
2741	err = atl1c_sw_init(adapter);
2742	if (err) {
2743		dev_err(&pdev->dev, "net device private data init failed\n");
2744		goto err_sw_init;
2745	}
2746	/* set max MTU */
2747	atl1c_set_max_mtu(netdev);
2748
2749	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2750
2751	/* Init GPHY as early as possible due to power saving issue  */
2752	atl1c_phy_reset(&adapter->hw);
2753
2754	err = atl1c_reset_mac(&adapter->hw);
2755	if (err) {
2756		err = -EIO;
2757		goto err_reset;
2758	}
2759
2760	/* reset the controller to
2761	 * put the device in a known good starting state */
2762	err = atl1c_phy_init(&adapter->hw);
2763	if (err) {
2764		err = -EIO;
2765		goto err_reset;
2766	}
2767	if (atl1c_read_mac_addr(&adapter->hw)) {
2768		/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
2769		netdev->addr_assign_type = NET_ADDR_RANDOM;
2770	}
2771	eth_hw_addr_set(netdev, adapter->hw.mac_addr);
2772	if (netif_msg_probe(adapter))
2773		dev_dbg(&pdev->dev, "mac address : %pM\n",
2774			adapter->hw.mac_addr);
2775
2776	atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
2777	INIT_WORK(&adapter->common_task, atl1c_common_task);
2778	adapter->work_event = 0;
2779	err = register_netdev(netdev);
2780	if (err) {
2781		dev_err(&pdev->dev, "register netdevice failed\n");
2782		goto err_register;
2783	}
2784
2785	cards_found++;
2786	return 0;
2787
2788err_reset:
2789err_register:
2790err_sw_init:
2791err_init_netdev:
2792	free_netdev(netdev);
2793err_alloc_etherdev:
2794	iounmap(hw_addr);
2795err_ioremap:
2796	pci_release_regions(pdev);
2797err_pci_reg:
2798err_dma:
2799	pci_disable_device(pdev);
2800	return err;
2801}
2802
2803/**
2804 * atl1c_remove - Device Removal Routine
2805 * @pdev: PCI device information struct
2806 *
2807 * atl1c_remove is called by the PCI subsystem to alert the driver
2808 * that it should release a PCI device.  The could be caused by a
2809 * Hot-Plug event, or because the driver is going to be removed from
2810 * memory.
2811 */
2812static void atl1c_remove(struct pci_dev *pdev)
2813{
2814	struct net_device *netdev = pci_get_drvdata(pdev);
2815	struct atl1c_adapter *adapter = netdev_priv(netdev);
2816
2817	unregister_netdev(netdev);
2818	/* restore permanent address */
2819	atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr);
2820	atl1c_phy_disable(&adapter->hw);
2821
2822	iounmap(adapter->hw.hw_addr);
2823
2824	pci_release_regions(pdev);
2825	pci_disable_device(pdev);
2826	free_netdev(netdev);
2827}
2828
2829/**
2830 * atl1c_io_error_detected - called when PCI error is detected
2831 * @pdev: Pointer to PCI device
2832 * @state: The current pci connection state
2833 *
2834 * This function is called after a PCI bus error affecting
2835 * this device has been detected.
2836 */
2837static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
2838						pci_channel_state_t state)
2839{
2840	struct net_device *netdev = pci_get_drvdata(pdev);
2841	struct atl1c_adapter *adapter = netdev_priv(netdev);
2842
2843	netif_device_detach(netdev);
2844
2845	if (state == pci_channel_io_perm_failure)
2846		return PCI_ERS_RESULT_DISCONNECT;
2847
2848	if (netif_running(netdev))
2849		atl1c_down(adapter);
2850
2851	pci_disable_device(pdev);
2852
2853	/* Request a slot reset. */
2854	return PCI_ERS_RESULT_NEED_RESET;
2855}
2856
2857/**
2858 * atl1c_io_slot_reset - called after the pci bus has been reset.
2859 * @pdev: Pointer to PCI device
2860 *
2861 * Restart the card from scratch, as if from a cold-boot. Implementation
2862 * resembles the first-half of the e1000_resume routine.
2863 */
2864static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
2865{
2866	struct net_device *netdev = pci_get_drvdata(pdev);
2867	struct atl1c_adapter *adapter = netdev_priv(netdev);
2868
2869	if (pci_enable_device(pdev)) {
2870		if (netif_msg_hw(adapter))
2871			dev_err(&pdev->dev,
2872				"Cannot re-enable PCI device after reset\n");
2873		return PCI_ERS_RESULT_DISCONNECT;
2874	}
2875	pci_set_master(pdev);
2876
2877	pci_enable_wake(pdev, PCI_D3hot, 0);
2878	pci_enable_wake(pdev, PCI_D3cold, 0);
2879
2880	atl1c_reset_mac(&adapter->hw);
2881
2882	return PCI_ERS_RESULT_RECOVERED;
2883}
2884
2885/**
2886 * atl1c_io_resume - called when traffic can start flowing again.
2887 * @pdev: Pointer to PCI device
2888 *
2889 * This callback is called when the error recovery driver tells us that
2890 * its OK to resume normal operation. Implementation resembles the
2891 * second-half of the atl1c_resume routine.
2892 */
2893static void atl1c_io_resume(struct pci_dev *pdev)
2894{
2895	struct net_device *netdev = pci_get_drvdata(pdev);
2896	struct atl1c_adapter *adapter = netdev_priv(netdev);
2897
2898	if (netif_running(netdev)) {
2899		if (atl1c_up(adapter)) {
2900			if (netif_msg_hw(adapter))
2901				dev_err(&pdev->dev,
2902					"Cannot bring device back up after reset\n");
2903			return;
2904		}
2905	}
2906
2907	netif_device_attach(netdev);
2908}
2909
2910static const struct pci_error_handlers atl1c_err_handler = {
2911	.error_detected = atl1c_io_error_detected,
2912	.slot_reset = atl1c_io_slot_reset,
2913	.resume = atl1c_io_resume,
2914};
2915
2916static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
2917
2918static struct pci_driver atl1c_driver = {
2919	.name     = atl1c_driver_name,
2920	.id_table = atl1c_pci_tbl,
2921	.probe    = atl1c_probe,
2922	.remove   = atl1c_remove,
2923	.shutdown = atl1c_shutdown,
2924	.err_handler = &atl1c_err_handler,
2925	.driver.pm = &atl1c_pm_ops,
2926};
2927
2928module_pci_driver(atl1c_driver);