Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/* Copyright (C) 2018 Microchip Technology Inc. */
   3
   4#include <linux/module.h>
   5#include <linux/pci.h>
   6#include <linux/netdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/crc32.h>
   9#include <linux/microchipphy.h>
  10#include <linux/net_tstamp.h>
  11#include <linux/of_mdio.h>
  12#include <linux/of_net.h>
  13#include <linux/phy.h>
  14#include <linux/phy_fixed.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/iopoll.h>
  17#include <linux/crc16.h>
  18#include <linux/phylink.h>
  19#include "lan743x_main.h"
  20#include "lan743x_ethtool.h"
  21
  22#define MMD_ACCESS_ADDRESS	0
  23#define MMD_ACCESS_WRITE	1
  24#define MMD_ACCESS_READ		2
  25#define MMD_ACCESS_READ_INC	3
  26#define PCS_POWER_STATE_DOWN	0x6
  27#define PCS_POWER_STATE_UP	0x4
  28
  29#define RFE_RD_FIFO_TH_3_DWORDS	0x3
  30
  31static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
  32{
  33	u32 chip_rev;
  34	u32 cfg_load;
  35	u32 hw_cfg;
  36	u32 strap;
  37	int ret;
  38
  39	/* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */
  40	ret = lan743x_hs_syslock_acquire(adapter, 100);
  41	if (ret < 0) {
  42		netif_err(adapter, drv, adapter->netdev,
  43			  "Sys Lock acquire failed ret:%d\n", ret);
  44		return;
  45	}
  46
  47	cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG);
  48	lan743x_hs_syslock_release(adapter);
  49	hw_cfg = lan743x_csr_read(adapter, HW_CFG);
  50
  51	if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ ||
  52	    hw_cfg & HW_CFG_RST_PROTECT_) {
  53		strap = lan743x_csr_read(adapter, STRAP_READ);
  54		if (strap & STRAP_READ_SGMII_EN_)
  55			adapter->is_sgmii_en = true;
  56		else
  57			adapter->is_sgmii_en = false;
  58	} else {
  59		chip_rev = lan743x_csr_read(adapter, FPGA_REV);
  60		if (chip_rev) {
  61			if (chip_rev & FPGA_SGMII_OP)
  62				adapter->is_sgmii_en = true;
  63			else
  64				adapter->is_sgmii_en = false;
  65		} else {
  66			adapter->is_sgmii_en = false;
  67		}
  68	}
  69	netif_dbg(adapter, drv, adapter->netdev,
  70		  "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis");
  71}
  72
  73static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
  74{
  75	struct lan743x_csr *csr = &adapter->csr;
  76	u32 id_rev = csr->id_rev;
  77
  78	if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) ||
  79	    ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) {
  80		return true;
  81	}
  82	return false;
  83}
  84
  85static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
  86{
  87	pci_release_selected_regions(adapter->pdev,
  88				     pci_select_bars(adapter->pdev,
  89						     IORESOURCE_MEM));
  90	pci_disable_device(adapter->pdev);
  91}
  92
  93static int lan743x_pci_init(struct lan743x_adapter *adapter,
  94			    struct pci_dev *pdev)
  95{
  96	unsigned long bars = 0;
  97	int ret;
  98
  99	adapter->pdev = pdev;
 100	ret = pci_enable_device_mem(pdev);
 101	if (ret)
 102		goto return_error;
 103
 104	netif_info(adapter, probe, adapter->netdev,
 105		   "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
 106		   pdev->vendor, pdev->device);
 107	bars = pci_select_bars(pdev, IORESOURCE_MEM);
 108	if (!test_bit(0, &bars))
 109		goto disable_device;
 110
 111	ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
 112	if (ret)
 113		goto disable_device;
 114
 115	pci_set_master(pdev);
 116	return 0;
 117
 118disable_device:
 119	pci_disable_device(adapter->pdev);
 120
 121return_error:
 122	return ret;
 123}
 124
 125u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
 126{
 127	return ioread32(&adapter->csr.csr_address[offset]);
 128}
 129
 130void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
 131		       u32 data)
 132{
 133	iowrite32(data, &adapter->csr.csr_address[offset]);
 134}
 135
 136#define LAN743X_CSR_READ_OP(offset)	lan743x_csr_read(adapter, offset)
 137
 138static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
 139{
 140	u32 data;
 141
 142	data = lan743x_csr_read(adapter, HW_CFG);
 143	data |= HW_CFG_LRST_;
 144	lan743x_csr_write(adapter, HW_CFG, data);
 145
 146	return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
 147				  !(data & HW_CFG_LRST_), 100000, 10000000);
 148}
 149
 150static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter,
 151					   int offset, u32 bit_mask,
 152					   int target_value, int udelay_min,
 153					   int udelay_max, int count)
 154{
 155	u32 data;
 156
 157	return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data,
 158					 target_value == !!(data & bit_mask),
 159					 udelay_max, udelay_min * count);
 160}
 161
 162static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
 163				    int offset, u32 bit_mask,
 164				    int target_value, int usleep_min,
 165				    int usleep_max, int count)
 166{
 167	u32 data;
 168
 169	return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
 170				  target_value == !!(data & bit_mask),
 171				  usleep_max, usleep_min * count);
 172}
 173
 174static int lan743x_csr_init(struct lan743x_adapter *adapter)
 175{
 176	struct lan743x_csr *csr = &adapter->csr;
 177	resource_size_t bar_start, bar_length;
 
 178
 179	bar_start = pci_resource_start(adapter->pdev, 0);
 180	bar_length = pci_resource_len(adapter->pdev, 0);
 181	csr->csr_address = devm_ioremap(&adapter->pdev->dev,
 182					bar_start, bar_length);
 183	if (!csr->csr_address)
 184		return -ENOMEM;
 
 
 185
 186	csr->id_rev = lan743x_csr_read(adapter, ID_REV);
 187	csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
 188	netif_info(adapter, probe, adapter->netdev,
 189		   "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
 190		   csr->id_rev,	FPGA_REV_GET_MAJOR_(csr->fpga_rev),
 191		   FPGA_REV_GET_MINOR_(csr->fpga_rev));
 192	if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev))
 193		return -ENODEV;
 
 
 194
 195	csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 196	switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
 197	case ID_REV_CHIP_REV_A0_:
 198		csr->flags |= LAN743X_CSR_FLAG_IS_A0;
 199		csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 200		break;
 201	case ID_REV_CHIP_REV_B0_:
 202		csr->flags |= LAN743X_CSR_FLAG_IS_B0;
 203		break;
 204	}
 205
 206	return lan743x_csr_light_reset(adapter);
 
 
 
 
 
 207}
 208
 209static void lan743x_intr_software_isr(struct lan743x_adapter *adapter)
 210{
 211	struct lan743x_intr *intr = &adapter->intr;
 212
 213	/* disable the interrupt to prevent repeated re-triggering */
 214	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
 215	intr->software_isr_flag = true;
 216	wake_up(&intr->software_isr_wq);
 217}
 218
 219static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
 220{
 221	struct lan743x_tx *tx = context;
 222	struct lan743x_adapter *adapter = tx->adapter;
 223	bool enable_flag = true;
 224
 225	lan743x_csr_read(adapter, INT_EN_SET);
 226	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 227		lan743x_csr_write(adapter, INT_EN_CLR,
 228				  INT_BIT_DMA_TX_(tx->channel_number));
 229	}
 230
 231	if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
 232		u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
 233		u32 dmac_int_sts;
 234		u32 dmac_int_en;
 235
 236		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 237			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 238		else
 239			dmac_int_sts = ioc_bit;
 240		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 241			dmac_int_en = lan743x_csr_read(adapter,
 242						       DMAC_INT_EN_SET);
 243		else
 244			dmac_int_en = ioc_bit;
 245
 246		dmac_int_en &= ioc_bit;
 247		dmac_int_sts &= dmac_int_en;
 248		if (dmac_int_sts & ioc_bit) {
 249			napi_schedule(&tx->napi);
 250			enable_flag = false;/* poll func will enable later */
 251		}
 252	}
 253
 254	if (enable_flag)
 255		/* enable isr */
 256		lan743x_csr_write(adapter, INT_EN_SET,
 257				  INT_BIT_DMA_TX_(tx->channel_number));
 258}
 259
 260static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
 261{
 262	struct lan743x_rx *rx = context;
 263	struct lan743x_adapter *adapter = rx->adapter;
 264	bool enable_flag = true;
 265
 266	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 267		lan743x_csr_write(adapter, INT_EN_CLR,
 268				  INT_BIT_DMA_RX_(rx->channel_number));
 269	}
 270
 271	if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
 272		u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
 273		u32 dmac_int_sts;
 274		u32 dmac_int_en;
 275
 276		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 277			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 278		else
 279			dmac_int_sts = rx_frame_bit;
 280		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 281			dmac_int_en = lan743x_csr_read(adapter,
 282						       DMAC_INT_EN_SET);
 283		else
 284			dmac_int_en = rx_frame_bit;
 285
 286		dmac_int_en &= rx_frame_bit;
 287		dmac_int_sts &= dmac_int_en;
 288		if (dmac_int_sts & rx_frame_bit) {
 289			napi_schedule(&rx->napi);
 290			enable_flag = false;/* poll funct will enable later */
 291		}
 292	}
 293
 294	if (enable_flag) {
 295		/* enable isr */
 296		lan743x_csr_write(adapter, INT_EN_SET,
 297				  INT_BIT_DMA_RX_(rx->channel_number));
 298	}
 299}
 300
 301static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
 302{
 303	struct lan743x_adapter *adapter = context;
 304	unsigned int channel;
 305
 306	if (int_sts & INT_BIT_ALL_RX_) {
 307		for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
 308			channel++) {
 309			u32 int_bit = INT_BIT_DMA_RX_(channel);
 310
 311			if (int_sts & int_bit) {
 312				lan743x_rx_isr(&adapter->rx[channel],
 313					       int_bit, flags);
 314				int_sts &= ~int_bit;
 315			}
 316		}
 317	}
 318	if (int_sts & INT_BIT_ALL_TX_) {
 319		for (channel = 0; channel < adapter->used_tx_channels;
 320			channel++) {
 321			u32 int_bit = INT_BIT_DMA_TX_(channel);
 322
 323			if (int_sts & int_bit) {
 324				lan743x_tx_isr(&adapter->tx[channel],
 325					       int_bit, flags);
 326				int_sts &= ~int_bit;
 327			}
 328		}
 329	}
 330	if (int_sts & INT_BIT_ALL_OTHER_) {
 331		if (int_sts & INT_BIT_SW_GP_) {
 332			lan743x_intr_software_isr(adapter);
 333			int_sts &= ~INT_BIT_SW_GP_;
 334		}
 335		if (int_sts & INT_BIT_1588_) {
 336			lan743x_ptp_isr(adapter);
 337			int_sts &= ~INT_BIT_1588_;
 338		}
 339	}
 340	if (int_sts)
 341		lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
 342}
 343
 344static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
 345{
 346	struct lan743x_vector *vector = ptr;
 347	struct lan743x_adapter *adapter = vector->adapter;
 348	irqreturn_t result = IRQ_NONE;
 349	u32 int_enables;
 350	u32 int_sts;
 351
 352	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
 353		int_sts = lan743x_csr_read(adapter, INT_STS);
 354	} else if (vector->flags &
 355		   (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
 356		   LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
 357		int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
 358	} else {
 359		/* use mask as implied status */
 360		int_sts = vector->int_mask | INT_BIT_MAS_;
 361	}
 362
 363	if (!(int_sts & INT_BIT_MAS_))
 364		goto irq_done;
 365
 366	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
 367		/* disable vector interrupt */
 368		lan743x_csr_write(adapter,
 369				  INT_VEC_EN_CLR,
 370				  INT_VEC_EN_(vector->vector_index));
 371
 372	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
 373		/* disable master interrupt */
 374		lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 375
 376	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
 377		int_enables = lan743x_csr_read(adapter, INT_EN_SET);
 378	} else {
 379		/*  use vector mask as implied enable mask */
 380		int_enables = vector->int_mask;
 381	}
 382
 383	int_sts &= int_enables;
 384	int_sts &= vector->int_mask;
 385	if (int_sts) {
 386		if (vector->handler) {
 387			vector->handler(vector->context,
 388					int_sts, vector->flags);
 389		} else {
 390			/* disable interrupts on this vector */
 391			lan743x_csr_write(adapter, INT_EN_CLR,
 392					  vector->int_mask);
 393		}
 394		result = IRQ_HANDLED;
 395	}
 396
 397	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
 398		/* enable master interrupt */
 399		lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 400
 401	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
 402		/* enable vector interrupt */
 403		lan743x_csr_write(adapter,
 404				  INT_VEC_EN_SET,
 405				  INT_VEC_EN_(vector->vector_index));
 406irq_done:
 407	return result;
 408}
 409
 410static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
 411{
 412	struct lan743x_intr *intr = &adapter->intr;
 413	int ret;
 414
 415	intr->software_isr_flag = false;
 416
 417	/* enable and activate test interrupt */
 418	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
 419	lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
 420
 421	ret = wait_event_timeout(intr->software_isr_wq,
 422				 intr->software_isr_flag,
 423				 msecs_to_jiffies(200));
 424
 425	/* disable test interrupt */
 426	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
 427
 428	return ret > 0 ? 0 : -ENODEV;
 429}
 430
 431static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
 432				     int vector_index, u32 flags,
 433				     u32 int_mask,
 434				     lan743x_vector_handler handler,
 435				     void *context)
 436{
 437	struct lan743x_vector *vector = &adapter->intr.vector_list
 438					[vector_index];
 439	int ret;
 440
 441	vector->adapter = adapter;
 442	vector->flags = flags;
 443	vector->vector_index = vector_index;
 444	vector->int_mask = int_mask;
 445	vector->handler = handler;
 446	vector->context = context;
 447
 448	ret = request_irq(vector->irq,
 449			  lan743x_intr_entry_isr,
 450			  (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
 451			  IRQF_SHARED : 0, DRIVER_NAME, vector);
 452	if (ret) {
 453		vector->handler = NULL;
 454		vector->context = NULL;
 455		vector->int_mask = 0;
 456		vector->flags = 0;
 457	}
 458	return ret;
 459}
 460
 461static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
 462					int vector_index)
 463{
 464	struct lan743x_vector *vector = &adapter->intr.vector_list
 465					[vector_index];
 466
 467	free_irq(vector->irq, vector);
 468	vector->handler = NULL;
 469	vector->context = NULL;
 470	vector->int_mask = 0;
 471	vector->flags = 0;
 472}
 473
 474static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
 475					 u32 int_mask)
 476{
 477	int index;
 478
 479	for (index = 0; index < adapter->max_vector_count; index++) {
 480		if (adapter->intr.vector_list[index].int_mask & int_mask)
 481			return adapter->intr.vector_list[index].flags;
 482	}
 483	return 0;
 484}
 485
 486static void lan743x_intr_close(struct lan743x_adapter *adapter)
 487{
 488	struct lan743x_intr *intr = &adapter->intr;
 489	int index = 0;
 490
 491	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 492	if (adapter->is_pci11x1x)
 493		lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF);
 494	else
 495		lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
 496
 497	for (index = 0; index < intr->number_of_vectors; index++) {
 498		if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
 499			lan743x_intr_unregister_isr(adapter, index);
 500			intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
 501		}
 502	}
 503
 504	if (intr->flags & INTR_FLAG_MSI_ENABLED) {
 505		pci_disable_msi(adapter->pdev);
 506		intr->flags &= ~INTR_FLAG_MSI_ENABLED;
 507	}
 508
 509	if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
 510		pci_disable_msix(adapter->pdev);
 511		intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
 512	}
 513}
 514
 515static int lan743x_intr_open(struct lan743x_adapter *adapter)
 516{
 517	struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT];
 518	struct lan743x_intr *intr = &adapter->intr;
 519	unsigned int used_tx_channels;
 520	u32 int_vec_en_auto_clr = 0;
 521	u8 max_vector_count;
 522	u32 int_vec_map0 = 0;
 523	u32 int_vec_map1 = 0;
 524	int ret = -ENODEV;
 525	int index = 0;
 526	u32 flags = 0;
 527
 528	intr->number_of_vectors = 0;
 529
 530	/* Try to set up MSIX interrupts */
 531	max_vector_count = adapter->max_vector_count;
 532	memset(&msix_entries[0], 0,
 533	       sizeof(struct msix_entry) * max_vector_count);
 534	for (index = 0; index < max_vector_count; index++)
 535		msix_entries[index].entry = index;
 536	used_tx_channels = adapter->used_tx_channels;
 537	ret = pci_enable_msix_range(adapter->pdev,
 538				    msix_entries, 1,
 539				    1 + used_tx_channels +
 540				    LAN743X_USED_RX_CHANNELS);
 541
 542	if (ret > 0) {
 543		intr->flags |= INTR_FLAG_MSIX_ENABLED;
 544		intr->number_of_vectors = ret;
 545		intr->using_vectors = true;
 546		for (index = 0; index < intr->number_of_vectors; index++)
 547			intr->vector_list[index].irq = msix_entries
 548						       [index].vector;
 549		netif_info(adapter, ifup, adapter->netdev,
 550			   "using MSIX interrupts, number of vectors = %d\n",
 551			   intr->number_of_vectors);
 552	}
 553
 554	/* If MSIX failed try to setup using MSI interrupts */
 555	if (!intr->number_of_vectors) {
 556		if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 557			if (!pci_enable_msi(adapter->pdev)) {
 558				intr->flags |= INTR_FLAG_MSI_ENABLED;
 559				intr->number_of_vectors = 1;
 560				intr->using_vectors = true;
 561				intr->vector_list[0].irq =
 562					adapter->pdev->irq;
 563				netif_info(adapter, ifup, adapter->netdev,
 564					   "using MSI interrupts, number of vectors = %d\n",
 565					   intr->number_of_vectors);
 566			}
 567		}
 568	}
 569
 570	/* If MSIX, and MSI failed, setup using legacy interrupt */
 571	if (!intr->number_of_vectors) {
 572		intr->number_of_vectors = 1;
 573		intr->using_vectors = false;
 574		intr->vector_list[0].irq = intr->irq;
 575		netif_info(adapter, ifup, adapter->netdev,
 576			   "using legacy interrupts\n");
 577	}
 578
 579	/* At this point we must have at least one irq */
 580	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
 581
 582	/* map all interrupts to vector 0 */
 583	lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
 584	lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
 585	lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
 586	flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 587		LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 588		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 589		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 590
 591	if (intr->using_vectors) {
 592		flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 593			 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 594	} else {
 595		flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
 596			 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
 597			 LAN743X_VECTOR_FLAG_IRQ_SHARED;
 598	}
 599
 600	if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 601		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
 602		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
 603		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 604		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
 605		flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
 606		flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
 607	}
 608
 609	init_waitqueue_head(&intr->software_isr_wq);
 610
 611	ret = lan743x_intr_register_isr(adapter, 0, flags,
 612					INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
 613					INT_BIT_ALL_OTHER_,
 614					lan743x_intr_shared_isr, adapter);
 615	if (ret)
 616		goto clean_up;
 617	intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
 618
 619	if (intr->using_vectors)
 620		lan743x_csr_write(adapter, INT_VEC_EN_SET,
 621				  INT_VEC_EN_(0));
 622
 623	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 624		lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
 625		lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
 626		lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
 627		lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
 628		lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
 629		lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
 630		lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
 631		lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
 632		if (adapter->is_pci11x1x) {
 633			lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD);
 634			lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD);
 635			lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654);
 636			lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210);
 637		} else {
 638			lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
 639			lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
 640		}
 641		lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
 642	}
 643
 644	/* enable interrupts */
 645	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 646	ret = lan743x_intr_test_isr(adapter);
 647	if (ret)
 648		goto clean_up;
 649
 650	if (intr->number_of_vectors > 1) {
 651		int number_of_tx_vectors = intr->number_of_vectors - 1;
 652
 653		if (number_of_tx_vectors > used_tx_channels)
 654			number_of_tx_vectors = used_tx_channels;
 655		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 656			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 657			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 658			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 659			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 660			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 661
 662		if (adapter->csr.flags &
 663		   LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 664			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 665				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 666				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 667				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 668		}
 669
 670		for (index = 0; index < number_of_tx_vectors; index++) {
 671			u32 int_bit = INT_BIT_DMA_TX_(index);
 672			int vector = index + 1;
 673
 674			/* map TX interrupt to vector */
 675			int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
 676			lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
 677
 678			/* Remove TX interrupt from shared mask */
 679			intr->vector_list[0].int_mask &= ~int_bit;
 680			ret = lan743x_intr_register_isr(adapter, vector, flags,
 681							int_bit, lan743x_tx_isr,
 682							&adapter->tx[index]);
 683			if (ret)
 684				goto clean_up;
 685			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 686			if (!(flags &
 687			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
 688				lan743x_csr_write(adapter, INT_VEC_EN_SET,
 689						  INT_VEC_EN_(vector));
 690		}
 691	}
 692	if ((intr->number_of_vectors - used_tx_channels) > 1) {
 693		int number_of_rx_vectors = intr->number_of_vectors -
 694						used_tx_channels - 1;
 695
 696		if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
 697			number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
 698
 699		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 700			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 701			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 702			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 703			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 704			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 705
 706		if (adapter->csr.flags &
 707		    LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 708			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
 709				LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 710				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 711				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 712				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 713		}
 714		for (index = 0; index < number_of_rx_vectors; index++) {
 715			int vector = index + 1 + used_tx_channels;
 716			u32 int_bit = INT_BIT_DMA_RX_(index);
 717
 718			/* map RX interrupt to vector */
 719			int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
 720			lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
 721			if (flags &
 722			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
 723				int_vec_en_auto_clr |= INT_VEC_EN_(vector);
 724				lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
 725						  int_vec_en_auto_clr);
 726			}
 727
 728			/* Remove RX interrupt from shared mask */
 729			intr->vector_list[0].int_mask &= ~int_bit;
 730			ret = lan743x_intr_register_isr(adapter, vector, flags,
 731							int_bit, lan743x_rx_isr,
 732							&adapter->rx[index]);
 733			if (ret)
 734				goto clean_up;
 735			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 736
 737			lan743x_csr_write(adapter, INT_VEC_EN_SET,
 738					  INT_VEC_EN_(vector));
 739		}
 740	}
 741	return 0;
 742
 743clean_up:
 744	lan743x_intr_close(adapter);
 745	return ret;
 746}
 747
 748static int lan743x_dp_write(struct lan743x_adapter *adapter,
 749			    u32 select, u32 addr, u32 length, u32 *buf)
 750{
 751	u32 dp_sel;
 752	int i;
 753
 754	if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_,
 755					    1, 40, 100, 100))
 756		return -EIO;
 757	dp_sel = lan743x_csr_read(adapter, DP_SEL);
 758	dp_sel &= ~DP_SEL_MASK_;
 759	dp_sel |= select;
 760	lan743x_csr_write(adapter, DP_SEL, dp_sel);
 761
 762	for (i = 0; i < length; i++) {
 763		lan743x_csr_write(adapter, DP_ADDR, addr + i);
 764		lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
 765		lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
 766		if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL,
 767						    DP_SEL_DPRDY_,
 768						    1, 40, 100, 100))
 769			return -EIO;
 770	}
 771
 772	return 0;
 773}
 774
 775static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
 776{
 777	u32 ret;
 778
 779	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
 780		MAC_MII_ACC_PHY_ADDR_MASK_;
 781	ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
 782		MAC_MII_ACC_MIIRINDA_MASK_;
 783
 784	if (read)
 785		ret |= MAC_MII_ACC_MII_READ_;
 786	else
 787		ret |= MAC_MII_ACC_MII_WRITE_;
 788	ret |= MAC_MII_ACC_MII_BUSY_;
 789
 790	return ret;
 791}
 792
 793static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
 794{
 795	u32 data;
 796
 797	return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
 798				  !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
 799}
 800
 801static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index)
 802{
 803	struct lan743x_adapter *adapter = bus->priv;
 804	u32 val, mii_access;
 805	int ret;
 806
 807	/* confirm MII not busy */
 808	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 809	if (ret < 0)
 810		return ret;
 811
 812	/* set the address, index & direction (read from PHY) */
 813	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
 814	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 815	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 816	if (ret < 0)
 817		return ret;
 818
 819	val = lan743x_csr_read(adapter, MAC_MII_DATA);
 820	return (int)(val & 0xFFFF);
 821}
 822
 823static int lan743x_mdiobus_write_c22(struct mii_bus *bus,
 824				     int phy_id, int index, u16 regval)
 825{
 826	struct lan743x_adapter *adapter = bus->priv;
 827	u32 val, mii_access;
 828	int ret;
 829
 830	/* confirm MII not busy */
 831	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 832	if (ret < 0)
 833		return ret;
 834	val = (u32)regval;
 835	lan743x_csr_write(adapter, MAC_MII_DATA, val);
 836
 837	/* set the address, index & direction (write to PHY) */
 838	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
 839	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 840	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 841	return ret;
 842}
 843
 844static u32 lan743x_mac_mmd_access(int id, int dev_addr, int op)
 845{
 
 846	u32 ret;
 847
 
 848	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
 849		MAC_MII_ACC_PHY_ADDR_MASK_;
 850	ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
 851		MAC_MII_ACC_MIIMMD_MASK_;
 852	if (op == MMD_ACCESS_WRITE)
 853		ret |= MAC_MII_ACC_MIICMD_WRITE_;
 854	else if (op == MMD_ACCESS_READ)
 855		ret |= MAC_MII_ACC_MIICMD_READ_;
 856	else if (op == MMD_ACCESS_READ_INC)
 857		ret |= MAC_MII_ACC_MIICMD_READ_INC_;
 858	else
 859		ret |= MAC_MII_ACC_MIICMD_ADDR_;
 860	ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_);
 861
 862	return ret;
 863}
 864
 865static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id,
 866				    int dev_addr, int index)
 867{
 868	struct lan743x_adapter *adapter = bus->priv;
 869	u32 mmd_access;
 870	int ret;
 871
 872	/* confirm MII not busy */
 873	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 874	if (ret < 0)
 875		return ret;
 876
 877	/* Load Register Address */
 878	lan743x_csr_write(adapter, MAC_MII_DATA, index);
 879	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
 880					    MMD_ACCESS_ADDRESS);
 881	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 882	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 883	if (ret < 0)
 884		return ret;
 885
 886	/* Read Data */
 887	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
 888					    MMD_ACCESS_READ);
 889	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 890	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 891	if (ret < 0)
 892		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 893
 894	ret = lan743x_csr_read(adapter, MAC_MII_DATA);
 895	return (int)(ret & 0xFFFF);
 896}
 897
 898static int lan743x_mdiobus_write_c45(struct mii_bus *bus, int phy_id,
 899				     int dev_addr, int index, u16 regval)
 900{
 901	struct lan743x_adapter *adapter = bus->priv;
 902	u32 mmd_access;
 903	int ret;
 904
 905	/* confirm MII not busy */
 906	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 907	if (ret < 0)
 908		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 909
 910	/* Load Register Address */
 911	lan743x_csr_write(adapter, MAC_MII_DATA, (u32)index);
 912	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
 913					    MMD_ACCESS_ADDRESS);
 914	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 915	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 916	if (ret < 0)
 917		return ret;
 918
 919	/* Write Data */
 920	lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
 921	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
 922					    MMD_ACCESS_WRITE);
 923	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 924
 925	return lan743x_mac_mii_wait_till_not_busy(adapter);
 926}
 927
 928static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
 929{
 930	u32 data;
 931	int ret;
 932
 933	ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data,
 934				 !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000);
 935	if (ret < 0)
 936		netif_err(adapter, drv, adapter->netdev,
 937			  "%s: error %d sgmii wait timeout\n", __func__, ret);
 938
 939	return ret;
 940}
 941
 942int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
 943{
 944	u32 mmd_access;
 945	int ret;
 946	u32 val;
 947
 948	if (mmd > 31) {
 949		netif_err(adapter, probe, adapter->netdev,
 950			  "%s mmd should <= 31\n", __func__);
 951		return -EINVAL;
 952	}
 953
 954	mutex_lock(&adapter->sgmii_rw_lock);
 955	/* Load Register Address */
 956	mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
 957	mmd_access |= (addr | SGMII_ACC_SGMII_BZY_);
 958	lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
 959	ret = lan743x_sgmii_wait_till_not_busy(adapter);
 960	if (ret < 0)
 961		goto sgmii_unlock;
 962
 963	val = lan743x_csr_read(adapter, SGMII_DATA);
 964	ret = (int)(val & SGMII_DATA_MASK_);
 965
 966sgmii_unlock:
 967	mutex_unlock(&adapter->sgmii_rw_lock);
 968
 969	return ret;
 970}
 971
 972static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
 973			       u8 mmd, u16 addr, u16 val)
 974{
 975	u32 mmd_access;
 976	int ret;
 977
 978	if (mmd > 31) {
 979		netif_err(adapter, probe, adapter->netdev,
 980			  "%s mmd should <= 31\n", __func__);
 981		return -EINVAL;
 982	}
 983	mutex_lock(&adapter->sgmii_rw_lock);
 984	/* Load Register Data */
 985	lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_));
 986	/* Load Register Address */
 987	mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
 988	mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_);
 989	lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
 990	ret = lan743x_sgmii_wait_till_not_busy(adapter);
 991	mutex_unlock(&adapter->sgmii_rw_lock);
 992
 993	return ret;
 994}
 995
 996static int lan743x_get_lsd(int speed, int duplex, u8 mss)
 997{
 998	int lsd;
 999
1000	switch (speed) {
1001	case SPEED_2500:
1002		if (mss == MASTER_SLAVE_STATE_SLAVE)
1003			lsd = LINK_2500_SLAVE;
1004		else
1005			lsd = LINK_2500_MASTER;
1006		break;
1007	case SPEED_1000:
1008		if (mss == MASTER_SLAVE_STATE_SLAVE)
1009			lsd = LINK_1000_SLAVE;
1010		else
1011			lsd = LINK_1000_MASTER;
1012		break;
1013	case SPEED_100:
1014		if (duplex == DUPLEX_FULL)
1015			lsd = LINK_100FD;
1016		else
1017			lsd = LINK_100HD;
1018		break;
1019	case SPEED_10:
1020		if (duplex == DUPLEX_FULL)
1021			lsd = LINK_10FD;
1022		else
1023			lsd = LINK_10HD;
1024		break;
1025	default:
1026		lsd = -EINVAL;
1027	}
1028
1029	return lsd;
1030}
1031
1032static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
1033				  u16 baud)
1034{
1035	int mpllctrl0;
1036	int mpllctrl1;
1037	int miscctrl1;
1038	int ret;
1039
1040	mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1041				       VR_MII_GEN2_4_MPLL_CTRL0);
1042	if (mpllctrl0 < 0)
1043		return mpllctrl0;
1044
1045	mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_;
1046	if (baud == VR_MII_BAUD_RATE_1P25GBPS) {
1047		mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100;
1048		/* mpll_baud_clk/4 */
1049		miscctrl1 = 0xA;
1050	} else {
1051		mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125;
1052		/* mpll_baud_clk/2 */
1053		miscctrl1 = 0x5;
1054	}
1055
1056	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1057				  VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0);
1058	if (ret < 0)
1059		return ret;
1060
1061	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1062				  VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1);
1063	if (ret < 0)
1064		return ret;
1065
1066	return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1067				  VR_MII_GEN2_4_MISC_CTRL1, miscctrl1);
1068}
1069
1070static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
1071				       bool enable)
1072{
1073	if (enable)
1074		return lan743x_sgmii_mpll_set(adapter,
1075					      VR_MII_BAUD_RATE_3P125GBPS);
1076	else
1077		return lan743x_sgmii_mpll_set(adapter,
1078					      VR_MII_BAUD_RATE_1P25GBPS);
1079}
1080
1081static int lan743x_serdes_clock_and_aneg_update(struct lan743x_adapter *adapter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1082{
1083	enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
1084	int mii_ctrl;
1085	int dgt_ctrl;
1086	int an_ctrl;
1087	int ret;
1088
1089	if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE)
1090		/* Switch to 2.5 Gbps */
1091		ret = lan743x_sgmii_2_5G_mode_set(adapter, true);
1092	else
1093		/* Switch to 10/100/1000 Mbps clock */
1094		ret = lan743x_sgmii_2_5G_mode_set(adapter, false);
1095	if (ret < 0)
1096		return ret;
1097
1098	/* Enable SGMII Auto NEG */
1099	mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
1100	if (mii_ctrl < 0)
1101		return mii_ctrl;
1102
1103	an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL);
1104	if (an_ctrl < 0)
1105		return an_ctrl;
1106
1107	dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1108				      VR_MII_DIG_CTRL1);
1109	if (dgt_ctrl < 0)
1110		return dgt_ctrl;
1111
1112	if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) {
1113		mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100);
1114		mii_ctrl |= BMCR_SPEED1000;
1115		dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
1116		dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
1117		/* In order for Auto-Negotiation to operate properly at
1118		 * 2.5 Gbps the 1.6ms link timer values must be adjusted
1119		 * The VR_MII_LINK_TIMER_CTRL Register must be set to
1120		 * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the
1121		 * VR_MII_DIG_CTRL1 Register set to 1
1122		 */
1123		ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1124					  VR_MII_LINK_TIMER_CTRL, 0x7A1);
1125		if (ret < 0)
1126			return ret;
1127	} else {
1128		mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1129		an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_;
1130		dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
1131		dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
1132	}
1133
1134	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR,
1135				  mii_ctrl);
1136	if (ret < 0)
1137		return ret;
1138
1139	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1140				  VR_MII_DIG_CTRL1, dgt_ctrl);
1141	if (ret < 0)
1142		return ret;
1143
1144	return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1145				  VR_MII_AN_CTRL, an_ctrl);
1146}
1147
1148static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
1149{
1150	u8 wait_cnt = 0;
1151	u32 dig_sts;
1152
1153	do {
1154		dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1155					     VR_MII_DIG_STS);
1156		if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >>
1157		      VR_MII_DIG_STS_PSEQ_STATE_POS_) == state)
1158			break;
1159		usleep_range(1000, 2000);
1160	} while (wait_cnt++ < 10);
1161
1162	if (wait_cnt >= 10)
1163		return -ETIMEDOUT;
1164
1165	return 0;
1166}
1167
1168static int lan743x_pcs_power_reset(struct lan743x_adapter *adapter)
1169{
 
 
 
1170	int mii_ctl;
 
1171	int ret;
1172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1173	/* SGMII/1000/2500BASE-X PCS power down */
1174	mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
1175	if (mii_ctl < 0)
1176		return mii_ctl;
1177
1178	mii_ctl |= BMCR_PDOWN;
1179	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
1180	if (ret < 0)
1181		return ret;
1182
1183	ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN);
1184	if (ret < 0)
1185		return ret;
1186
1187	/* SGMII/1000/2500BASE-X PCS power up */
1188	mii_ctl &= ~BMCR_PDOWN;
1189	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
1190	if (ret < 0)
1191		return ret;
1192
1193	return lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
 
 
 
 
1194}
1195
1196static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
1197				    u8 *addr)
1198{
1199	u32 addr_lo, addr_hi;
1200
1201	addr_lo = addr[0] |
1202		addr[1] << 8 |
1203		addr[2] << 16 |
1204		addr[3] << 24;
1205	addr_hi = addr[4] |
1206		addr[5] << 8;
1207	lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
1208	lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
1209
1210	ether_addr_copy(adapter->mac_address, addr);
1211	netif_info(adapter, drv, adapter->netdev,
1212		   "MAC address set to %pM\n", addr);
1213}
1214
1215static int lan743x_mac_init(struct lan743x_adapter *adapter)
1216{
1217	bool mac_address_valid = true;
1218	struct net_device *netdev;
1219	u32 mac_addr_hi = 0;
1220	u32 mac_addr_lo = 0;
1221	u32 data;
1222
1223	netdev = adapter->netdev;
1224
1225	/* disable auto duplex, and speed detection. Phylib does that */
1226	data = lan743x_csr_read(adapter, MAC_CR);
1227	data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
1228	data |= MAC_CR_CNTR_RST_;
1229	lan743x_csr_write(adapter, MAC_CR, data);
1230
1231	if (!is_valid_ether_addr(adapter->mac_address)) {
1232		mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
1233		mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
1234		adapter->mac_address[0] = mac_addr_lo & 0xFF;
1235		adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
1236		adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
1237		adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
1238		adapter->mac_address[4] = mac_addr_hi & 0xFF;
1239		adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
1240
1241		if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
1242		    mac_addr_lo == 0xFFFFFFFF) {
1243			mac_address_valid = false;
1244		} else if (!is_valid_ether_addr(adapter->mac_address)) {
1245			mac_address_valid = false;
1246		}
1247
1248		if (!mac_address_valid)
1249			eth_random_addr(adapter->mac_address);
1250	}
1251	lan743x_mac_set_address(adapter, adapter->mac_address);
1252	eth_hw_addr_set(netdev, adapter->mac_address);
1253
1254	return 0;
1255}
1256
1257static int lan743x_mac_open(struct lan743x_adapter *adapter)
1258{
1259	u32 temp;
1260
1261	temp = lan743x_csr_read(adapter, MAC_RX);
1262	lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
1263	temp = lan743x_csr_read(adapter, MAC_TX);
1264	lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
1265	return 0;
1266}
1267
1268static void lan743x_mac_close(struct lan743x_adapter *adapter)
1269{
1270	u32 temp;
1271
1272	temp = lan743x_csr_read(adapter, MAC_TX);
1273	temp &= ~MAC_TX_TXEN_;
1274	lan743x_csr_write(adapter, MAC_TX, temp);
1275	lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
1276				 1, 1000, 20000, 100);
1277
1278	temp = lan743x_csr_read(adapter, MAC_RX);
1279	temp &= ~MAC_RX_RXEN_;
1280	lan743x_csr_write(adapter, MAC_RX, temp);
1281	lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
1282				 1, 1000, 20000, 100);
1283}
1284
1285void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
1286				       bool tx_enable, bool rx_enable)
1287{
1288	u32 flow_setting = 0;
1289
1290	/* set maximum pause time because when fifo space frees
1291	 * up a zero value pause frame will be sent to release the pause
1292	 */
1293	flow_setting = MAC_FLOW_CR_FCPT_MASK_;
1294	if (tx_enable)
1295		flow_setting |= MAC_FLOW_CR_TX_FCEN_;
1296	if (rx_enable)
1297		flow_setting |= MAC_FLOW_CR_RX_FCEN_;
1298	lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
1299}
1300
1301static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
1302{
1303	int enabled = 0;
1304	u32 mac_rx = 0;
1305
1306	mac_rx = lan743x_csr_read(adapter, MAC_RX);
1307	if (mac_rx & MAC_RX_RXEN_) {
1308		enabled = 1;
1309		if (mac_rx & MAC_RX_RXD_) {
1310			lan743x_csr_write(adapter, MAC_RX, mac_rx);
1311			mac_rx &= ~MAC_RX_RXD_;
1312		}
1313		mac_rx &= ~MAC_RX_RXEN_;
1314		lan743x_csr_write(adapter, MAC_RX, mac_rx);
1315		lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
1316					 1, 1000, 20000, 100);
1317		lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
1318	}
1319
1320	mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
1321	mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
1322		  << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1323	lan743x_csr_write(adapter, MAC_RX, mac_rx);
1324
1325	if (enabled) {
1326		mac_rx |= MAC_RX_RXEN_;
1327		lan743x_csr_write(adapter, MAC_RX, mac_rx);
1328	}
1329	return 0;
1330}
1331
1332/* PHY */
1333static int lan743x_phy_reset(struct lan743x_adapter *adapter)
1334{
1335	u32 data;
1336
1337	/* Only called with in probe, and before mdiobus_register */
1338
1339	data = lan743x_csr_read(adapter, PMT_CTL);
1340	data |= PMT_CTL_ETH_PHY_RST_;
1341	lan743x_csr_write(adapter, PMT_CTL, data);
1342
1343	return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
1344				  (!(data & PMT_CTL_ETH_PHY_RST_) &&
1345				  (data & PMT_CTL_READY_)),
1346				  50000, 1000000);
1347}
1348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1349static int lan743x_phy_init(struct lan743x_adapter *adapter)
1350{
1351	return lan743x_phy_reset(adapter);
1352}
1353
1354static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
1355{
1356	u32 id_rev;
 
1357	u32 data;
1358
1359	data = lan743x_csr_read(adapter, MAC_CR);
1360	id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
 
 
 
 
 
 
 
 
 
 
 
 
1361
1362	if (adapter->is_pci11x1x && adapter->is_sgmii_en)
1363		adapter->phy_interface = PHY_INTERFACE_MODE_SGMII;
1364	else if (id_rev == ID_REV_ID_LAN7430_)
1365		adapter->phy_interface = PHY_INTERFACE_MODE_GMII;
1366	else if ((id_rev == ID_REV_ID_LAN7431_) && (data & MAC_CR_MII_EN_))
1367		adapter->phy_interface = PHY_INTERFACE_MODE_MII;
1368	else
1369		adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
1370
1371	netif_dbg(adapter, drv, adapter->netdev,
1372		  "selected phy interface: 0x%X\n", adapter->phy_interface);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1373}
1374
1375static void lan743x_rfe_open(struct lan743x_adapter *adapter)
1376{
1377	lan743x_csr_write(adapter, RFE_RSS_CFG,
1378		RFE_RSS_CFG_UDP_IPV6_EX_ |
1379		RFE_RSS_CFG_TCP_IPV6_EX_ |
1380		RFE_RSS_CFG_IPV6_EX_ |
1381		RFE_RSS_CFG_UDP_IPV6_ |
1382		RFE_RSS_CFG_TCP_IPV6_ |
1383		RFE_RSS_CFG_IPV6_ |
1384		RFE_RSS_CFG_UDP_IPV4_ |
1385		RFE_RSS_CFG_TCP_IPV4_ |
1386		RFE_RSS_CFG_IPV4_ |
1387		RFE_RSS_CFG_VALID_HASH_BITS_ |
1388		RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
1389		RFE_RSS_CFG_RSS_HASH_STORE_ |
1390		RFE_RSS_CFG_RSS_ENABLE_);
1391}
1392
1393static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
1394{
1395	u8 *mac_addr;
1396	u32 mac_addr_hi = 0;
1397	u32 mac_addr_lo = 0;
1398
1399	/* Add mac address to perfect Filter */
1400	mac_addr = adapter->mac_address;
1401	mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
1402		      (((u32)(mac_addr[1])) << 8) |
1403		      (((u32)(mac_addr[2])) << 16) |
1404		      (((u32)(mac_addr[3])) << 24));
1405	mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
1406		      (((u32)(mac_addr[5])) << 8));
1407
1408	lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
1409	lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
1410			  mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
1411}
1412
1413static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
1414{
1415	struct net_device *netdev = adapter->netdev;
1416	u32 hash_table[DP_SEL_VHF_HASH_LEN];
1417	u32 rfctl;
1418	u32 data;
1419
1420	rfctl = lan743x_csr_read(adapter, RFE_CTL);
1421	rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
1422		 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1423	rfctl |= RFE_CTL_AB_;
1424	if (netdev->flags & IFF_PROMISC) {
1425		rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
1426	} else {
1427		if (netdev->flags & IFF_ALLMULTI)
1428			rfctl |= RFE_CTL_AM_;
1429	}
1430
1431	if (netdev->features & NETIF_F_RXCSUM)
1432		rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_;
1433
1434	memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
1435	if (netdev_mc_count(netdev)) {
1436		struct netdev_hw_addr *ha;
1437		int i;
1438
1439		rfctl |= RFE_CTL_DA_PERFECT_;
1440		i = 1;
1441		netdev_for_each_mc_addr(ha, netdev) {
1442			/* set first 32 into Perfect Filter */
1443			if (i < 33) {
1444				lan743x_csr_write(adapter,
1445						  RFE_ADDR_FILT_HI(i), 0);
1446				data = ha->addr[3];
1447				data = ha->addr[2] | (data << 8);
1448				data = ha->addr[1] | (data << 8);
1449				data = ha->addr[0] | (data << 8);
1450				lan743x_csr_write(adapter,
1451						  RFE_ADDR_FILT_LO(i), data);
1452				data = ha->addr[5];
1453				data = ha->addr[4] | (data << 8);
1454				data |= RFE_ADDR_FILT_HI_VALID_;
1455				lan743x_csr_write(adapter,
1456						  RFE_ADDR_FILT_HI(i), data);
1457			} else {
1458				u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
1459					     23) & 0x1FF;
1460				hash_table[bitnum / 32] |= (1 << (bitnum % 32));
1461				rfctl |= RFE_CTL_MCAST_HASH_;
1462			}
1463			i++;
1464		}
1465	}
1466
1467	lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
1468			 DP_SEL_VHF_VLAN_LEN,
1469			 DP_SEL_VHF_HASH_LEN, hash_table);
1470	lan743x_csr_write(adapter, RFE_CTL, rfctl);
1471}
1472
1473static int lan743x_dmac_init(struct lan743x_adapter *adapter)
1474{
1475	u32 data = 0;
1476
1477	lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
1478	lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
1479				 0, 1000, 20000, 100);
1480	switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
1481	case DMA_DESCRIPTOR_SPACING_16:
1482		data = DMAC_CFG_MAX_DSPACE_16_;
1483		break;
1484	case DMA_DESCRIPTOR_SPACING_32:
1485		data = DMAC_CFG_MAX_DSPACE_32_;
1486		break;
1487	case DMA_DESCRIPTOR_SPACING_64:
1488		data = DMAC_CFG_MAX_DSPACE_64_;
1489		break;
1490	case DMA_DESCRIPTOR_SPACING_128:
1491		data = DMAC_CFG_MAX_DSPACE_128_;
1492		break;
1493	default:
1494		return -EPERM;
1495	}
1496	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1497		data |= DMAC_CFG_COAL_EN_;
1498	data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
1499	data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
1500	lan743x_csr_write(adapter, DMAC_CFG, data);
1501	data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
1502	data |= DMAC_COAL_CFG_TIMER_TX_START_;
1503	data |= DMAC_COAL_CFG_FLUSH_INTS_;
1504	data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
1505	data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
1506	data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
1507	data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
1508	lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
1509	data = DMAC_OBFF_TX_THRES_SET_(0x08);
1510	data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
1511	lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
1512	return 0;
1513}
1514
1515static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
1516				     int tx_channel)
1517{
1518	u32 dmac_cmd = 0;
1519
1520	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1521	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1522				      DMAC_CMD_START_T_(tx_channel)),
1523				      (dmac_cmd &
1524				      DMAC_CMD_STOP_T_(tx_channel)));
1525}
1526
1527static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
1528					     int tx_channel)
1529{
1530	int timeout = 100;
1531	int result = 0;
1532
1533	while (timeout &&
1534	       ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
1535	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1536		usleep_range(1000, 20000);
1537		timeout--;
1538	}
1539	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1540		result = -ENODEV;
1541	return result;
1542}
1543
1544static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
1545				     int rx_channel)
1546{
1547	u32 dmac_cmd = 0;
1548
1549	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1550	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1551				      DMAC_CMD_START_R_(rx_channel)),
1552				      (dmac_cmd &
1553				      DMAC_CMD_STOP_R_(rx_channel)));
1554}
1555
1556static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
1557					     int rx_channel)
1558{
1559	int timeout = 100;
1560	int result = 0;
1561
1562	while (timeout &&
1563	       ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
1564	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1565		usleep_range(1000, 20000);
1566		timeout--;
1567	}
1568	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1569		result = -ENODEV;
1570	return result;
1571}
1572
1573static void lan743x_tx_release_desc(struct lan743x_tx *tx,
1574				    int descriptor_index, bool cleanup)
1575{
1576	struct lan743x_tx_buffer_info *buffer_info = NULL;
1577	struct lan743x_tx_descriptor *descriptor = NULL;
1578	u32 descriptor_type = 0;
1579	bool ignore_sync;
1580
1581	descriptor = &tx->ring_cpu_ptr[descriptor_index];
1582	buffer_info = &tx->buffer_info[descriptor_index];
1583	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
1584		goto done;
1585
1586	descriptor_type = le32_to_cpu(descriptor->data0) &
1587			  TX_DESC_DATA0_DTYPE_MASK_;
1588	if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
1589		goto clean_up_data_descriptor;
1590	else
1591		goto clear_active;
1592
1593clean_up_data_descriptor:
1594	if (buffer_info->dma_ptr) {
1595		if (buffer_info->flags &
1596		    TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
1597			dma_unmap_page(&tx->adapter->pdev->dev,
1598				       buffer_info->dma_ptr,
1599				       buffer_info->buffer_length,
1600				       DMA_TO_DEVICE);
1601		} else {
1602			dma_unmap_single(&tx->adapter->pdev->dev,
1603					 buffer_info->dma_ptr,
1604					 buffer_info->buffer_length,
1605					 DMA_TO_DEVICE);
1606		}
1607		buffer_info->dma_ptr = 0;
1608		buffer_info->buffer_length = 0;
1609	}
1610	if (!buffer_info->skb)
1611		goto clear_active;
1612
1613	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
1614		dev_kfree_skb_any(buffer_info->skb);
1615		goto clear_skb;
1616	}
1617
1618	if (cleanup) {
1619		lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
1620		dev_kfree_skb_any(buffer_info->skb);
1621	} else {
1622		ignore_sync = (buffer_info->flags &
1623			       TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
1624		lan743x_ptp_tx_timestamp_skb(tx->adapter,
1625					     buffer_info->skb, ignore_sync);
1626	}
1627
1628clear_skb:
1629	buffer_info->skb = NULL;
1630
1631clear_active:
1632	buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
1633
1634done:
1635	memset(buffer_info, 0, sizeof(*buffer_info));
1636	memset(descriptor, 0, sizeof(*descriptor));
1637}
1638
1639static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
1640{
1641	return ((++index) % tx->ring_size);
1642}
1643
1644static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
1645{
1646	while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
1647		lan743x_tx_release_desc(tx, tx->last_head, false);
1648		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1649	}
1650}
1651
1652static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
1653{
1654	u32 original_head = 0;
1655
1656	original_head = tx->last_head;
1657	do {
1658		lan743x_tx_release_desc(tx, tx->last_head, true);
1659		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1660	} while (tx->last_head != original_head);
1661	memset(tx->ring_cpu_ptr, 0,
1662	       sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
1663	memset(tx->buffer_info, 0,
1664	       sizeof(*tx->buffer_info) * (tx->ring_size));
1665}
1666
1667static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
1668				   struct sk_buff *skb)
1669{
1670	int result = 1; /* 1 for the main skb buffer */
1671	int nr_frags = 0;
1672
1673	if (skb_is_gso(skb))
1674		result++; /* requires an extension descriptor */
1675	nr_frags = skb_shinfo(skb)->nr_frags;
1676	result += nr_frags; /* 1 for each fragment buffer */
1677	return result;
1678}
1679
1680static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
1681{
1682	int last_head = tx->last_head;
1683	int last_tail = tx->last_tail;
1684
1685	if (last_tail >= last_head)
1686		return tx->ring_size - last_tail + last_head - 1;
1687	else
1688		return last_head - last_tail - 1;
1689}
1690
1691static void lan743x_rx_cfg_b_tstamp_config(struct lan743x_adapter *adapter,
1692					   int rx_ts_config)
1693{
1694	int channel_number;
1695	int index;
1696	u32 data;
1697
1698	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
1699		channel_number = adapter->rx[index].channel_number;
1700		data = lan743x_csr_read(adapter, RX_CFG_B(channel_number));
1701		data &= RX_CFG_B_TS_MASK_;
1702		data |= rx_ts_config;
1703		lan743x_csr_write(adapter, RX_CFG_B(channel_number),
1704				  data);
1705	}
1706}
1707
1708int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
1709			       int rx_filter)
1710{
1711	u32 data;
1712
1713	switch (rx_filter) {
1714	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1715			lan743x_rx_cfg_b_tstamp_config(adapter,
1716						       RX_CFG_B_TS_DESCR_EN_);
1717			data = lan743x_csr_read(adapter, PTP_RX_TS_CFG);
1718			data |= PTP_RX_TS_CFG_EVENT_MSGS_;
1719			lan743x_csr_write(adapter, PTP_RX_TS_CFG, data);
1720			break;
1721	case HWTSTAMP_FILTER_NONE:
1722			lan743x_rx_cfg_b_tstamp_config(adapter,
1723						       RX_CFG_B_TS_NONE_);
1724			break;
1725	case HWTSTAMP_FILTER_ALL:
1726			lan743x_rx_cfg_b_tstamp_config(adapter,
1727						       RX_CFG_B_TS_ALL_RX_);
1728			break;
1729	default:
1730			return -ERANGE;
1731	}
1732	return 0;
1733}
1734
1735void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
1736				      bool enable_timestamping,
1737				      bool enable_onestep_sync)
1738{
1739	if (enable_timestamping)
1740		tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
1741	else
1742		tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
1743	if (enable_onestep_sync)
1744		tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
1745	else
1746		tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
1747}
1748
1749static int lan743x_tx_frame_start(struct lan743x_tx *tx,
1750				  unsigned char *first_buffer,
1751				  unsigned int first_buffer_length,
1752				  unsigned int frame_length,
1753				  bool time_stamp,
1754				  bool check_sum)
1755{
1756	/* called only from within lan743x_tx_xmit_frame.
1757	 * assuming tx->ring_lock has already been acquired.
1758	 */
1759	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1760	struct lan743x_tx_buffer_info *buffer_info = NULL;
1761	struct lan743x_adapter *adapter = tx->adapter;
1762	struct device *dev = &adapter->pdev->dev;
1763	dma_addr_t dma_ptr;
1764
1765	tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
1766	tx->frame_first = tx->last_tail;
1767	tx->frame_tail = tx->frame_first;
1768
1769	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1770	buffer_info = &tx->buffer_info[tx->frame_tail];
1771	dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
1772				 DMA_TO_DEVICE);
1773	if (dma_mapping_error(dev, dma_ptr))
1774		return -ENOMEM;
1775
1776	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1777	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1778	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1779		TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1780
1781	buffer_info->skb = NULL;
1782	buffer_info->dma_ptr = dma_ptr;
1783	buffer_info->buffer_length = first_buffer_length;
1784	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1785
1786	tx->frame_data0 = (first_buffer_length &
1787		TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1788		TX_DESC_DATA0_DTYPE_DATA_ |
1789		TX_DESC_DATA0_FS_ |
1790		TX_DESC_DATA0_FCS_;
1791	if (time_stamp)
1792		tx->frame_data0 |= TX_DESC_DATA0_TSE_;
1793
1794	if (check_sum)
1795		tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
1796				   TX_DESC_DATA0_IPE_ |
1797				   TX_DESC_DATA0_TPE_;
1798
1799	/* data0 will be programmed in one of other frame assembler functions */
1800	return 0;
1801}
1802
1803static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
1804				     unsigned int frame_length,
1805				     int nr_frags)
1806{
1807	/* called only from within lan743x_tx_xmit_frame.
1808	 * assuming tx->ring_lock has already been acquired.
1809	 */
1810	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1811	struct lan743x_tx_buffer_info *buffer_info = NULL;
1812
1813	/* wrap up previous descriptor */
1814	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
1815	if (nr_frags <= 0) {
1816		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1817		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1818	}
1819	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1820	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1821
1822	/* move to next descriptor */
1823	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1824	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1825	buffer_info = &tx->buffer_info[tx->frame_tail];
1826
1827	/* add extension descriptor */
1828	tx_descriptor->data1 = 0;
1829	tx_descriptor->data2 = 0;
1830	tx_descriptor->data3 = 0;
1831
1832	buffer_info->skb = NULL;
1833	buffer_info->dma_ptr = 0;
1834	buffer_info->buffer_length = 0;
1835	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1836
1837	tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
1838			  TX_DESC_DATA0_DTYPE_EXT_ |
1839			  TX_DESC_DATA0_EXT_LSO_;
1840
1841	/* data0 will be programmed in one of other frame assembler functions */
1842}
1843
1844static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
1845					 const skb_frag_t *fragment,
1846					 unsigned int frame_length)
1847{
1848	/* called only from within lan743x_tx_xmit_frame
1849	 * assuming tx->ring_lock has already been acquired
1850	 */
1851	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1852	struct lan743x_tx_buffer_info *buffer_info = NULL;
1853	struct lan743x_adapter *adapter = tx->adapter;
1854	struct device *dev = &adapter->pdev->dev;
1855	unsigned int fragment_length = 0;
1856	dma_addr_t dma_ptr;
1857
1858	fragment_length = skb_frag_size(fragment);
1859	if (!fragment_length)
1860		return 0;
1861
1862	/* wrap up previous descriptor */
1863	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1864	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1865
1866	/* move to next descriptor */
1867	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1868	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1869	buffer_info = &tx->buffer_info[tx->frame_tail];
1870	dma_ptr = skb_frag_dma_map(dev, fragment,
1871				   0, fragment_length,
1872				   DMA_TO_DEVICE);
1873	if (dma_mapping_error(dev, dma_ptr)) {
1874		int desc_index;
1875
1876		/* cleanup all previously setup descriptors */
1877		desc_index = tx->frame_first;
1878		while (desc_index != tx->frame_tail) {
1879			lan743x_tx_release_desc(tx, desc_index, true);
1880			desc_index = lan743x_tx_next_index(tx, desc_index);
1881		}
1882		dma_wmb();
1883		tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1884		tx->frame_first = 0;
1885		tx->frame_data0 = 0;
1886		tx->frame_tail = 0;
1887		return -ENOMEM;
1888	}
1889
1890	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1891	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1892	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1893			       TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1894
1895	buffer_info->skb = NULL;
1896	buffer_info->dma_ptr = dma_ptr;
1897	buffer_info->buffer_length = fragment_length;
1898	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1899	buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
1900
1901	tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1902			  TX_DESC_DATA0_DTYPE_DATA_ |
1903			  TX_DESC_DATA0_FCS_;
1904
1905	/* data0 will be programmed in one of other frame assembler functions */
1906	return 0;
1907}
1908
1909static void lan743x_tx_frame_end(struct lan743x_tx *tx,
1910				 struct sk_buff *skb,
1911				 bool time_stamp,
1912				 bool ignore_sync)
1913{
1914	/* called only from within lan743x_tx_xmit_frame
1915	 * assuming tx->ring_lock has already been acquired
1916	 */
1917	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1918	struct lan743x_tx_buffer_info *buffer_info = NULL;
1919	struct lan743x_adapter *adapter = tx->adapter;
1920	u32 tx_tail_flags = 0;
1921
1922	/* wrap up previous descriptor */
1923	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
1924	    TX_DESC_DATA0_DTYPE_DATA_) {
1925		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1926		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1927	}
1928
1929	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1930	buffer_info = &tx->buffer_info[tx->frame_tail];
1931	buffer_info->skb = skb;
1932	if (time_stamp)
1933		buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
1934	if (ignore_sync)
1935		buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
1936
1937	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1938	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1939	tx->last_tail = tx->frame_tail;
1940
1941	dma_wmb();
1942
1943	if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
1944		tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
1945	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
1946		tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
1947		TX_TAIL_SET_TOP_INT_EN_;
1948
1949	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1950			  tx_tail_flags | tx->frame_tail);
1951	tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1952}
1953
1954static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
1955					 struct sk_buff *skb)
1956{
1957	int required_number_of_descriptors = 0;
1958	unsigned int start_frame_length = 0;
1959	netdev_tx_t retval = NETDEV_TX_OK;
1960	unsigned int frame_length = 0;
1961	unsigned int head_length = 0;
1962	unsigned long irq_flags = 0;
1963	bool do_timestamp = false;
1964	bool ignore_sync = false;
1965	struct netdev_queue *txq;
1966	int nr_frags = 0;
1967	bool gso = false;
1968	int j;
1969
1970	required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
1971
1972	spin_lock_irqsave(&tx->ring_lock, irq_flags);
1973	if (required_number_of_descriptors >
1974		lan743x_tx_get_avail_desc(tx)) {
1975		if (required_number_of_descriptors > (tx->ring_size - 1)) {
1976			dev_kfree_skb_irq(skb);
1977		} else {
1978			/* save how many descriptors we needed to restart the queue */
1979			tx->rqd_descriptors = required_number_of_descriptors;
1980			retval = NETDEV_TX_BUSY;
1981			txq = netdev_get_tx_queue(tx->adapter->netdev,
1982						  tx->channel_number);
1983			netif_tx_stop_queue(txq);
1984		}
1985		goto unlock;
1986	}
1987
1988	/* space available, transmit skb  */
1989	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1990	    (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
1991	    (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
1992		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1993		do_timestamp = true;
1994		if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
1995			ignore_sync = true;
1996	}
1997	head_length = skb_headlen(skb);
1998	frame_length = skb_pagelen(skb);
1999	nr_frags = skb_shinfo(skb)->nr_frags;
2000	start_frame_length = frame_length;
2001	gso = skb_is_gso(skb);
2002	if (gso) {
2003		start_frame_length = max(skb_shinfo(skb)->gso_size,
2004					 (unsigned short)8);
2005	}
2006
2007	if (lan743x_tx_frame_start(tx,
2008				   skb->data, head_length,
2009				   start_frame_length,
2010				   do_timestamp,
2011				   skb->ip_summed == CHECKSUM_PARTIAL)) {
2012		dev_kfree_skb_irq(skb);
2013		goto unlock;
2014	}
2015	tx->frame_count++;
2016
2017	if (gso)
2018		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
2019
2020	if (nr_frags <= 0)
2021		goto finish;
2022
2023	for (j = 0; j < nr_frags; j++) {
2024		const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
2025
2026		if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
2027			/* upon error no need to call
2028			 *	lan743x_tx_frame_end
2029			 * frame assembler clean up was performed inside
2030			 *	lan743x_tx_frame_add_fragment
2031			 */
2032			dev_kfree_skb_irq(skb);
2033			goto unlock;
2034		}
2035	}
2036
2037finish:
2038	lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
2039
2040unlock:
2041	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
2042	return retval;
2043}
2044
2045static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
2046{
2047	struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
2048	struct lan743x_adapter *adapter = tx->adapter;
2049	unsigned long irq_flags = 0;
2050	struct netdev_queue *txq;
2051	u32 ioc_bit = 0;
2052
2053	ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
2054	lan743x_csr_read(adapter, DMAC_INT_STS);
2055	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
2056		lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
2057	spin_lock_irqsave(&tx->ring_lock, irq_flags);
2058
2059	/* clean up tx ring */
2060	lan743x_tx_release_completed_descriptors(tx);
2061	txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number);
2062	if (netif_tx_queue_stopped(txq)) {
2063		if (tx->rqd_descriptors) {
2064			if (tx->rqd_descriptors <=
2065			    lan743x_tx_get_avail_desc(tx)) {
2066				tx->rqd_descriptors = 0;
2067				netif_tx_wake_queue(txq);
2068			}
2069		} else {
2070			netif_tx_wake_queue(txq);
2071		}
2072	}
2073	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
2074
2075	if (!napi_complete(napi))
2076		goto done;
2077
2078	/* enable isr */
2079	lan743x_csr_write(adapter, INT_EN_SET,
2080			  INT_BIT_DMA_TX_(tx->channel_number));
2081	lan743x_csr_read(adapter, INT_STS);
2082
2083done:
2084	return 0;
2085}
2086
2087static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
2088{
2089	if (tx->head_cpu_ptr) {
2090		dma_free_coherent(&tx->adapter->pdev->dev,
2091				  sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr,
2092				  tx->head_dma_ptr);
2093		tx->head_cpu_ptr = NULL;
2094		tx->head_dma_ptr = 0;
2095	}
2096	kfree(tx->buffer_info);
2097	tx->buffer_info = NULL;
2098
2099	if (tx->ring_cpu_ptr) {
2100		dma_free_coherent(&tx->adapter->pdev->dev,
2101				  tx->ring_allocation_size, tx->ring_cpu_ptr,
2102				  tx->ring_dma_ptr);
2103		tx->ring_allocation_size = 0;
2104		tx->ring_cpu_ptr = NULL;
2105		tx->ring_dma_ptr = 0;
2106	}
2107	tx->ring_size = 0;
2108}
2109
2110static int lan743x_tx_ring_init(struct lan743x_tx *tx)
2111{
2112	size_t ring_allocation_size = 0;
2113	void *cpu_ptr = NULL;
2114	dma_addr_t dma_ptr;
2115	int ret = -ENOMEM;
2116
2117	tx->ring_size = LAN743X_TX_RING_SIZE;
2118	if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
2119		ret = -EINVAL;
2120		goto cleanup;
2121	}
2122	if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
2123				      DMA_BIT_MASK(64))) {
2124		dev_warn(&tx->adapter->pdev->dev,
2125			 "lan743x_: No suitable DMA available\n");
2126		ret = -ENOMEM;
2127		goto cleanup;
2128	}
2129	ring_allocation_size = ALIGN(tx->ring_size *
2130				     sizeof(struct lan743x_tx_descriptor),
2131				     PAGE_SIZE);
2132	dma_ptr = 0;
2133	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
2134				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2135	if (!cpu_ptr) {
2136		ret = -ENOMEM;
2137		goto cleanup;
2138	}
2139
2140	tx->ring_allocation_size = ring_allocation_size;
2141	tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
2142	tx->ring_dma_ptr = dma_ptr;
2143
2144	cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
2145	if (!cpu_ptr) {
2146		ret = -ENOMEM;
2147		goto cleanup;
2148	}
2149	tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
2150	dma_ptr = 0;
2151	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
2152				     sizeof(*tx->head_cpu_ptr), &dma_ptr,
2153				     GFP_KERNEL);
2154	if (!cpu_ptr) {
2155		ret = -ENOMEM;
2156		goto cleanup;
2157	}
2158
2159	tx->head_cpu_ptr = cpu_ptr;
2160	tx->head_dma_ptr = dma_ptr;
2161	if (tx->head_dma_ptr & 0x3) {
2162		ret = -ENOMEM;
2163		goto cleanup;
2164	}
2165
2166	return 0;
2167
2168cleanup:
2169	lan743x_tx_ring_cleanup(tx);
2170	return ret;
2171}
2172
2173static void lan743x_tx_close(struct lan743x_tx *tx)
2174{
2175	struct lan743x_adapter *adapter = tx->adapter;
2176
2177	lan743x_csr_write(adapter,
2178			  DMAC_CMD,
2179			  DMAC_CMD_STOP_T_(tx->channel_number));
2180	lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
2181
2182	lan743x_csr_write(adapter,
2183			  DMAC_INT_EN_CLR,
2184			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
2185	lan743x_csr_write(adapter, INT_EN_CLR,
2186			  INT_BIT_DMA_TX_(tx->channel_number));
2187	napi_disable(&tx->napi);
2188	netif_napi_del(&tx->napi);
2189
2190	lan743x_csr_write(adapter, FCT_TX_CTL,
2191			  FCT_TX_CTL_DIS_(tx->channel_number));
2192	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
2193				 FCT_TX_CTL_EN_(tx->channel_number),
2194				 0, 1000, 20000, 100);
2195
2196	lan743x_tx_release_all_descriptors(tx);
2197
2198	tx->rqd_descriptors = 0;
2199
2200	lan743x_tx_ring_cleanup(tx);
2201}
2202
2203static int lan743x_tx_open(struct lan743x_tx *tx)
2204{
2205	struct lan743x_adapter *adapter = NULL;
2206	u32 data = 0;
2207	int ret;
2208
2209	adapter = tx->adapter;
2210	ret = lan743x_tx_ring_init(tx);
2211	if (ret)
2212		return ret;
2213
2214	/* initialize fifo */
2215	lan743x_csr_write(adapter, FCT_TX_CTL,
2216			  FCT_TX_CTL_RESET_(tx->channel_number));
2217	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
2218				 FCT_TX_CTL_RESET_(tx->channel_number),
2219				 0, 1000, 20000, 100);
2220
2221	/* enable fifo */
2222	lan743x_csr_write(adapter, FCT_TX_CTL,
2223			  FCT_TX_CTL_EN_(tx->channel_number));
2224
2225	/* reset tx channel */
2226	lan743x_csr_write(adapter, DMAC_CMD,
2227			  DMAC_CMD_TX_SWR_(tx->channel_number));
2228	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2229				 DMAC_CMD_TX_SWR_(tx->channel_number),
2230				 0, 1000, 20000, 100);
2231
2232	/* Write TX_BASE_ADDR */
2233	lan743x_csr_write(adapter,
2234			  TX_BASE_ADDRH(tx->channel_number),
2235			  DMA_ADDR_HIGH32(tx->ring_dma_ptr));
2236	lan743x_csr_write(adapter,
2237			  TX_BASE_ADDRL(tx->channel_number),
2238			  DMA_ADDR_LOW32(tx->ring_dma_ptr));
2239
2240	/* Write TX_CFG_B */
2241	data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
2242	data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
2243	data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
2244	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2245		data |= TX_CFG_B_TDMABL_512_;
2246	lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
2247
2248	/* Write TX_CFG_A */
2249	data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
2250	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2251		data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
2252		data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
2253		data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
2254		data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
2255	}
2256	lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
2257
2258	/* Write TX_HEAD_WRITEBACK_ADDR */
2259	lan743x_csr_write(adapter,
2260			  TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
2261			  DMA_ADDR_HIGH32(tx->head_dma_ptr));
2262	lan743x_csr_write(adapter,
2263			  TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
2264			  DMA_ADDR_LOW32(tx->head_dma_ptr));
2265
2266	/* set last head */
2267	tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
2268
2269	/* write TX_TAIL */
2270	tx->last_tail = 0;
2271	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
2272			  (u32)(tx->last_tail));
2273	tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2274							 INT_BIT_DMA_TX_
2275							 (tx->channel_number));
2276	netif_napi_add_tx_weight(adapter->netdev,
2277				 &tx->napi, lan743x_tx_napi_poll,
2278				 NAPI_POLL_WEIGHT);
2279	napi_enable(&tx->napi);
2280
2281	data = 0;
2282	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2283		data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
2284	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2285		data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
2286	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2287		data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
2288	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2289		data |= TX_CFG_C_TX_INT_EN_R2C_;
2290	lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
2291
2292	if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
2293		lan743x_csr_write(adapter, INT_EN_SET,
2294				  INT_BIT_DMA_TX_(tx->channel_number));
2295	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2296			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
2297
2298	/*  start dmac channel */
2299	lan743x_csr_write(adapter, DMAC_CMD,
2300			  DMAC_CMD_START_T_(tx->channel_number));
2301	return 0;
2302}
2303
2304static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
2305{
2306	return ((++index) % rx->ring_size);
2307}
2308
2309static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
2310{
2311	/* update the tail once per 8 descriptors */
2312	if ((index & 7) == 7)
2313		lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
2314				  index);
2315}
2316
2317static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
2318					gfp_t gfp)
2319{
2320	struct net_device *netdev = rx->adapter->netdev;
2321	struct device *dev = &rx->adapter->pdev->dev;
2322	struct lan743x_rx_buffer_info *buffer_info;
2323	unsigned int buffer_length, used_length;
2324	struct lan743x_rx_descriptor *descriptor;
2325	struct sk_buff *skb;
2326	dma_addr_t dma_ptr;
2327
2328	buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
2329
2330	descriptor = &rx->ring_cpu_ptr[index];
2331	buffer_info = &rx->buffer_info[index];
2332	skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
2333	if (!skb)
2334		return -ENOMEM;
2335	dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
2336	if (dma_mapping_error(dev, dma_ptr)) {
2337		dev_kfree_skb_any(skb);
2338		return -ENOMEM;
2339	}
2340	if (buffer_info->dma_ptr) {
2341		/* sync used area of buffer only */
2342		if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_)
2343			/* frame length is valid only if LS bit is set.
2344			 * it's a safe upper bound for the used area in this
2345			 * buffer.
2346			 */
2347			used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_
2348					  (le32_to_cpu(descriptor->data0)),
2349					  buffer_info->buffer_length);
2350		else
2351			used_length = buffer_info->buffer_length;
2352		dma_sync_single_for_cpu(dev, buffer_info->dma_ptr,
2353					used_length,
2354					DMA_FROM_DEVICE);
2355		dma_unmap_single_attrs(dev, buffer_info->dma_ptr,
2356				       buffer_info->buffer_length,
2357				       DMA_FROM_DEVICE,
2358				       DMA_ATTR_SKIP_CPU_SYNC);
2359	}
2360
2361	buffer_info->skb = skb;
2362	buffer_info->dma_ptr = dma_ptr;
2363	buffer_info->buffer_length = buffer_length;
2364	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2365	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2366	descriptor->data3 = 0;
2367	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2368			    (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2369	lan743x_rx_update_tail(rx, index);
2370
2371	return 0;
2372}
2373
2374static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
2375{
2376	struct lan743x_rx_buffer_info *buffer_info;
2377	struct lan743x_rx_descriptor *descriptor;
2378
2379	descriptor = &rx->ring_cpu_ptr[index];
2380	buffer_info = &rx->buffer_info[index];
2381
2382	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2383	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2384	descriptor->data3 = 0;
2385	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2386			    ((buffer_info->buffer_length) &
2387			    RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2388	lan743x_rx_update_tail(rx, index);
2389}
2390
2391static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
2392{
2393	struct lan743x_rx_buffer_info *buffer_info;
2394	struct lan743x_rx_descriptor *descriptor;
2395
2396	descriptor = &rx->ring_cpu_ptr[index];
2397	buffer_info = &rx->buffer_info[index];
2398
2399	memset(descriptor, 0, sizeof(*descriptor));
2400
2401	if (buffer_info->dma_ptr) {
2402		dma_unmap_single(&rx->adapter->pdev->dev,
2403				 buffer_info->dma_ptr,
2404				 buffer_info->buffer_length,
2405				 DMA_FROM_DEVICE);
2406		buffer_info->dma_ptr = 0;
2407	}
2408
2409	if (buffer_info->skb) {
2410		dev_kfree_skb(buffer_info->skb);
2411		buffer_info->skb = NULL;
2412	}
2413
2414	memset(buffer_info, 0, sizeof(*buffer_info));
2415}
2416
2417static struct sk_buff *
2418lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
2419{
2420	if (skb_linearize(skb)) {
2421		dev_kfree_skb_irq(skb);
2422		return NULL;
2423	}
2424	frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
2425	if (skb->len > frame_length) {
2426		skb->tail -= skb->len - frame_length;
2427		skb->len = frame_length;
2428	}
2429	return skb;
2430}
2431
2432static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
2433{
2434	int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
2435	struct lan743x_rx_descriptor *descriptor, *desc_ext;
2436	struct net_device *netdev = rx->adapter->netdev;
2437	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2438	struct lan743x_rx_buffer_info *buffer_info;
2439	int frame_length, buffer_length;
2440	bool is_ice, is_tce, is_icsm;
2441	int extension_index = -1;
2442	bool is_last, is_first;
2443	struct sk_buff *skb;
2444
2445	if (current_head_index < 0 || current_head_index >= rx->ring_size)
2446		goto done;
2447
2448	if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
2449		goto done;
2450
2451	if (rx->last_head == current_head_index)
2452		goto done;
2453
2454	descriptor = &rx->ring_cpu_ptr[rx->last_head];
2455	if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
2456		goto done;
2457	buffer_info = &rx->buffer_info[rx->last_head];
2458
2459	is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_;
2460	is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_;
2461
2462	if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
2463		/* extension is expected to follow */
2464		int index = lan743x_rx_next_index(rx, rx->last_head);
2465
2466		if (index == current_head_index)
2467			/* extension not yet available */
2468			goto done;
2469		desc_ext = &rx->ring_cpu_ptr[index];
2470		if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_)
2471			/* extension not yet available */
2472			goto done;
2473		if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_))
2474			goto move_forward;
2475		extension_index = index;
2476	}
2477
2478	/* Only the last buffer in a multi-buffer frame contains the total frame
2479	 * length. The chip occasionally sends more buffers than strictly
2480	 * required to reach the total frame length.
2481	 * Handle this by adding all buffers to the skb in their entirety.
2482	 * Once the real frame length is known, trim the skb.
2483	 */
2484	frame_length =
2485		RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
2486	buffer_length = buffer_info->buffer_length;
2487	is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_;
2488	is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_;
2489	is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_;
2490
2491	netdev_dbg(netdev, "%s%schunk: %d/%d",
2492		   is_first ? "first " : "      ",
2493		   is_last  ? "last  " : "      ",
2494		   frame_length, buffer_length);
2495
2496	/* save existing skb, allocate new skb and map to dma */
2497	skb = buffer_info->skb;
2498	if (lan743x_rx_init_ring_element(rx, rx->last_head,
2499					 GFP_ATOMIC | GFP_DMA)) {
2500		/* failed to allocate next skb.
2501		 * Memory is very low.
2502		 * Drop this packet and reuse buffer.
2503		 */
2504		lan743x_rx_reuse_ring_element(rx, rx->last_head);
2505		/* drop packet that was being assembled */
2506		dev_kfree_skb_irq(rx->skb_head);
2507		rx->skb_head = NULL;
2508		goto process_extension;
2509	}
2510
2511	/* add buffers to skb via skb->frag_list */
2512	if (is_first) {
2513		skb_reserve(skb, RX_HEAD_PADDING);
2514		skb_put(skb, buffer_length - RX_HEAD_PADDING);
2515		if (rx->skb_head)
2516			dev_kfree_skb_irq(rx->skb_head);
2517		rx->skb_head = skb;
2518	} else if (rx->skb_head) {
2519		skb_put(skb, buffer_length);
2520		if (skb_shinfo(rx->skb_head)->frag_list)
2521			rx->skb_tail->next = skb;
2522		else
2523			skb_shinfo(rx->skb_head)->frag_list = skb;
2524		rx->skb_tail = skb;
2525		rx->skb_head->len += skb->len;
2526		rx->skb_head->data_len += skb->len;
2527		rx->skb_head->truesize += skb->truesize;
2528	} else {
2529		/* packet to assemble has already been dropped because one or
2530		 * more of its buffers could not be allocated
2531		 */
2532		netdev_dbg(netdev, "drop buffer intended for dropped packet");
2533		dev_kfree_skb_irq(skb);
2534	}
2535
2536process_extension:
2537	if (extension_index >= 0) {
2538		u32 ts_sec;
2539		u32 ts_nsec;
2540
2541		ts_sec = le32_to_cpu(desc_ext->data1);
2542		ts_nsec = (le32_to_cpu(desc_ext->data2) &
2543			  RX_DESC_DATA2_TS_NS_MASK_);
2544		if (rx->skb_head)
2545			skb_hwtstamps(rx->skb_head)->hwtstamp =
2546				ktime_set(ts_sec, ts_nsec);
2547		lan743x_rx_reuse_ring_element(rx, extension_index);
2548		rx->last_head = extension_index;
2549		netdev_dbg(netdev, "process extension");
2550	}
2551
2552	if (is_last && rx->skb_head)
2553		rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length);
2554
2555	if (is_last && rx->skb_head) {
2556		rx->skb_head->protocol = eth_type_trans(rx->skb_head,
2557							rx->adapter->netdev);
2558		if (rx->adapter->netdev->features & NETIF_F_RXCSUM) {
2559			if (!is_ice && !is_tce && !is_icsm)
2560				skb->ip_summed = CHECKSUM_UNNECESSARY;
2561		}
2562		netdev_dbg(netdev, "sending %d byte frame to OS",
2563			   rx->skb_head->len);
2564		napi_gro_receive(&rx->napi, rx->skb_head);
2565		rx->skb_head = NULL;
2566	}
2567
2568move_forward:
2569	/* push tail and head forward */
2570	rx->last_tail = rx->last_head;
2571	rx->last_head = lan743x_rx_next_index(rx, rx->last_head);
2572	result = RX_PROCESS_RESULT_BUFFER_RECEIVED;
2573done:
2574	return result;
2575}
2576
2577static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
2578{
2579	struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
2580	struct lan743x_adapter *adapter = rx->adapter;
2581	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2582	u32 rx_tail_flags = 0;
2583	int count;
2584
2585	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
2586		/* clear int status bit before reading packet */
2587		lan743x_csr_write(adapter, DMAC_INT_STS,
2588				  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2589	}
2590	for (count = 0; count < weight; count++) {
2591		result = lan743x_rx_process_buffer(rx);
2592		if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
2593			break;
2594	}
2595	rx->frame_count += count;
2596	if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED)
2597		return weight;
2598
2599	if (!napi_complete_done(napi, count))
2600		return count;
2601
2602	/* re-arm interrupts, must write to rx tail on some chip variants */
2603	if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2604		rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
2605	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
2606		rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
2607	} else {
2608		lan743x_csr_write(adapter, INT_EN_SET,
2609				  INT_BIT_DMA_RX_(rx->channel_number));
2610	}
2611
2612	if (rx_tail_flags)
2613		lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2614				  rx_tail_flags | rx->last_tail);
2615
2616	return count;
2617}
2618
2619static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
2620{
2621	if (rx->buffer_info && rx->ring_cpu_ptr) {
2622		int index;
2623
2624		for (index = 0; index < rx->ring_size; index++)
2625			lan743x_rx_release_ring_element(rx, index);
2626	}
2627
2628	if (rx->head_cpu_ptr) {
2629		dma_free_coherent(&rx->adapter->pdev->dev,
2630				  sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr,
2631				  rx->head_dma_ptr);
2632		rx->head_cpu_ptr = NULL;
2633		rx->head_dma_ptr = 0;
2634	}
2635
2636	kfree(rx->buffer_info);
2637	rx->buffer_info = NULL;
2638
2639	if (rx->ring_cpu_ptr) {
2640		dma_free_coherent(&rx->adapter->pdev->dev,
2641				  rx->ring_allocation_size, rx->ring_cpu_ptr,
2642				  rx->ring_dma_ptr);
2643		rx->ring_allocation_size = 0;
2644		rx->ring_cpu_ptr = NULL;
2645		rx->ring_dma_ptr = 0;
2646	}
2647
2648	rx->ring_size = 0;
2649	rx->last_head = 0;
2650}
2651
2652static int lan743x_rx_ring_init(struct lan743x_rx *rx)
2653{
2654	size_t ring_allocation_size = 0;
2655	dma_addr_t dma_ptr = 0;
2656	void *cpu_ptr = NULL;
2657	int ret = -ENOMEM;
2658	int index = 0;
2659
2660	rx->ring_size = LAN743X_RX_RING_SIZE;
2661	if (rx->ring_size <= 1) {
2662		ret = -EINVAL;
2663		goto cleanup;
2664	}
2665	if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
2666		ret = -EINVAL;
2667		goto cleanup;
2668	}
2669	if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
2670				      DMA_BIT_MASK(64))) {
2671		dev_warn(&rx->adapter->pdev->dev,
2672			 "lan743x_: No suitable DMA available\n");
2673		ret = -ENOMEM;
2674		goto cleanup;
2675	}
2676	ring_allocation_size = ALIGN(rx->ring_size *
2677				     sizeof(struct lan743x_rx_descriptor),
2678				     PAGE_SIZE);
2679	dma_ptr = 0;
2680	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2681				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2682	if (!cpu_ptr) {
2683		ret = -ENOMEM;
2684		goto cleanup;
2685	}
2686	rx->ring_allocation_size = ring_allocation_size;
2687	rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
2688	rx->ring_dma_ptr = dma_ptr;
2689
2690	cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
2691			  GFP_KERNEL);
2692	if (!cpu_ptr) {
2693		ret = -ENOMEM;
2694		goto cleanup;
2695	}
2696	rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
2697	dma_ptr = 0;
2698	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2699				     sizeof(*rx->head_cpu_ptr), &dma_ptr,
2700				     GFP_KERNEL);
2701	if (!cpu_ptr) {
2702		ret = -ENOMEM;
2703		goto cleanup;
2704	}
2705
2706	rx->head_cpu_ptr = cpu_ptr;
2707	rx->head_dma_ptr = dma_ptr;
2708	if (rx->head_dma_ptr & 0x3) {
2709		ret = -ENOMEM;
2710		goto cleanup;
2711	}
2712
2713	rx->last_head = 0;
2714	for (index = 0; index < rx->ring_size; index++) {
2715		ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
2716		if (ret)
2717			goto cleanup;
2718	}
2719	return 0;
2720
2721cleanup:
2722	netif_warn(rx->adapter, ifup, rx->adapter->netdev,
2723		   "Error allocating memory for LAN743x\n");
2724
2725	lan743x_rx_ring_cleanup(rx);
2726	return ret;
2727}
2728
2729static void lan743x_rx_close(struct lan743x_rx *rx)
2730{
2731	struct lan743x_adapter *adapter = rx->adapter;
2732
2733	lan743x_csr_write(adapter, FCT_RX_CTL,
2734			  FCT_RX_CTL_DIS_(rx->channel_number));
2735	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2736				 FCT_RX_CTL_EN_(rx->channel_number),
2737				 0, 1000, 20000, 100);
2738
2739	lan743x_csr_write(adapter, DMAC_CMD,
2740			  DMAC_CMD_STOP_R_(rx->channel_number));
2741	lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
2742
2743	lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
2744			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2745	lan743x_csr_write(adapter, INT_EN_CLR,
2746			  INT_BIT_DMA_RX_(rx->channel_number));
2747	napi_disable(&rx->napi);
2748
2749	netif_napi_del(&rx->napi);
2750
2751	lan743x_rx_ring_cleanup(rx);
2752}
2753
2754static int lan743x_rx_open(struct lan743x_rx *rx)
2755{
2756	struct lan743x_adapter *adapter = rx->adapter;
2757	u32 data = 0;
2758	int ret;
2759
2760	rx->frame_count = 0;
2761	ret = lan743x_rx_ring_init(rx);
2762	if (ret)
2763		goto return_error;
2764
2765	netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll);
2766
2767	lan743x_csr_write(adapter, DMAC_CMD,
2768			  DMAC_CMD_RX_SWR_(rx->channel_number));
2769	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2770				 DMAC_CMD_RX_SWR_(rx->channel_number),
2771				 0, 1000, 20000, 100);
2772
2773	/* set ring base address */
2774	lan743x_csr_write(adapter,
2775			  RX_BASE_ADDRH(rx->channel_number),
2776			  DMA_ADDR_HIGH32(rx->ring_dma_ptr));
2777	lan743x_csr_write(adapter,
2778			  RX_BASE_ADDRL(rx->channel_number),
2779			  DMA_ADDR_LOW32(rx->ring_dma_ptr));
2780
2781	/* set rx write back address */
2782	lan743x_csr_write(adapter,
2783			  RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
2784			  DMA_ADDR_HIGH32(rx->head_dma_ptr));
2785	lan743x_csr_write(adapter,
2786			  RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
2787			  DMA_ADDR_LOW32(rx->head_dma_ptr));
2788	data = RX_CFG_A_RX_HP_WB_EN_;
2789	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2790		data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
2791			RX_CFG_A_RX_WB_THRES_SET_(0x7) |
2792			RX_CFG_A_RX_PF_THRES_SET_(16) |
2793			RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
2794	}
2795
2796	/* set RX_CFG_A */
2797	lan743x_csr_write(adapter,
2798			  RX_CFG_A(rx->channel_number), data);
2799
2800	/* set RX_CFG_B */
2801	data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
2802	data &= ~RX_CFG_B_RX_PAD_MASK_;
2803	if (!RX_HEAD_PADDING)
2804		data |= RX_CFG_B_RX_PAD_0_;
2805	else
2806		data |= RX_CFG_B_RX_PAD_2_;
2807	data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
2808	data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
 
2809	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2810		data |= RX_CFG_B_RDMABL_512_;
2811
2812	lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
2813	rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2814							 INT_BIT_DMA_RX_
2815							 (rx->channel_number));
2816
2817	/* set RX_CFG_C */
2818	data = 0;
2819	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2820		data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
2821	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2822		data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
2823	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2824		data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
2825	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2826		data |= RX_CFG_C_RX_INT_EN_R2C_;
2827	lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
2828
2829	rx->last_tail = ((u32)(rx->ring_size - 1));
2830	lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2831			  rx->last_tail);
2832	rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
2833	if (rx->last_head) {
2834		ret = -EIO;
2835		goto napi_delete;
2836	}
2837
2838	napi_enable(&rx->napi);
2839
2840	lan743x_csr_write(adapter, INT_EN_SET,
2841			  INT_BIT_DMA_RX_(rx->channel_number));
2842	lan743x_csr_write(adapter, DMAC_INT_STS,
2843			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2844	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2845			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2846	lan743x_csr_write(adapter, DMAC_CMD,
2847			  DMAC_CMD_START_R_(rx->channel_number));
2848
2849	/* initialize fifo */
2850	lan743x_csr_write(adapter, FCT_RX_CTL,
2851			  FCT_RX_CTL_RESET_(rx->channel_number));
2852	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2853				 FCT_RX_CTL_RESET_(rx->channel_number),
2854				 0, 1000, 20000, 100);
2855	lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
2856			  FCT_FLOW_CTL_REQ_EN_ |
2857			  FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
2858			  FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
2859
2860	/* enable fifo */
2861	lan743x_csr_write(adapter, FCT_RX_CTL,
2862			  FCT_RX_CTL_EN_(rx->channel_number));
2863	return 0;
2864
2865napi_delete:
2866	netif_napi_del(&rx->napi);
2867	lan743x_rx_ring_cleanup(rx);
2868
2869return_error:
2870	return ret;
2871}
2872
2873static int lan743x_phylink_sgmii_config(struct lan743x_adapter *adapter)
2874{
2875	u32 sgmii_ctl;
2876	int ret;
2877
2878	ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
2879			      MASTER_SLAVE_STATE_MASTER);
2880	if (ret < 0) {
2881		netif_err(adapter, drv, adapter->netdev,
2882			  "error %d link-speed-duplex(LSD) invalid\n", ret);
2883		return ret;
2884	}
2885
2886	adapter->sgmii_lsd = ret;
2887	netif_dbg(adapter, drv, adapter->netdev,
2888		  "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
2889
2890	/* LINK_STATUS_SOURCE from the External PHY via SGMII */
2891	sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
2892	sgmii_ctl &= ~SGMII_CTL_LINK_STATUS_SOURCE_;
2893	lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
2894
2895	ret = lan743x_serdes_clock_and_aneg_update(adapter);
2896	if (ret < 0) {
2897		netif_err(adapter, drv, adapter->netdev,
2898			  "error %d sgmii aneg update failed\n", ret);
2899		return ret;
2900	}
2901
2902	return lan743x_pcs_power_reset(adapter);
2903}
2904
2905static int lan743x_phylink_1000basex_config(struct lan743x_adapter *adapter)
2906{
2907	u32 sgmii_ctl;
2908	int ret;
2909
2910	ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
2911			      MASTER_SLAVE_STATE_MASTER);
2912	if (ret < 0) {
2913		netif_err(adapter, drv, adapter->netdev,
2914			  "error %d link-speed-duplex(LSD) invalid\n", ret);
2915		return ret;
2916	}
2917
2918	adapter->sgmii_lsd = ret;
2919	netif_dbg(adapter, drv, adapter->netdev,
2920		  "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
2921
2922	/* LINK_STATUS_SOURCE from 1000BASE-X PCS link status */
2923	sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
2924	sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
2925	lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
2926
2927	ret = lan743x_serdes_clock_and_aneg_update(adapter);
2928	if (ret < 0) {
2929		netif_err(adapter, drv, adapter->netdev,
2930			  "error %d 1000basex aneg update failed\n", ret);
2931		return ret;
2932	}
2933
2934	return lan743x_pcs_power_reset(adapter);
2935}
2936
2937static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
2938{
2939	u32 sgmii_ctl;
2940	int ret;
2941
2942	ret = lan743x_get_lsd(SPEED_2500, DUPLEX_FULL,
2943			      MASTER_SLAVE_STATE_MASTER);
2944	if (ret < 0) {
2945		netif_err(adapter, drv, adapter->netdev,
2946			  "error %d link-speed-duplex(LSD) invalid\n", ret);
2947		return ret;
2948	}
2949
2950	adapter->sgmii_lsd = ret;
2951	netif_dbg(adapter, drv, adapter->netdev,
2952		  "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
2953
2954	/* LINK_STATUS_SOURCE from 2500BASE-X PCS link status */
2955	sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
2956	sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
2957	lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
2958
2959	ret = lan743x_serdes_clock_and_aneg_update(adapter);
2960	if (ret < 0) {
2961		netif_err(adapter, drv, adapter->netdev,
2962			  "error %d 2500basex aneg update failed\n", ret);
2963		return ret;
2964	}
2965
2966	return lan743x_pcs_power_reset(adapter);
2967}
2968
2969void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
2970{
2971	u32 mac_cr;
2972
2973	mac_cr = lan743x_csr_read(adapter, MAC_CR);
2974	if (enable)
2975		mac_cr |= MAC_CR_EEE_EN_;
2976	else
2977		mac_cr &= ~MAC_CR_EEE_EN_;
2978	lan743x_csr_write(adapter, MAC_CR, mac_cr);
2979}
2980
2981static void lan743x_phylink_mac_config(struct phylink_config *config,
2982				       unsigned int link_an_mode,
2983				       const struct phylink_link_state *state)
2984{
2985	struct net_device *netdev = to_net_dev(config->dev);
2986	struct lan743x_adapter *adapter = netdev_priv(netdev);
2987	int ret;
2988
2989	switch (state->interface) {
2990	case PHY_INTERFACE_MODE_2500BASEX:
2991		ret = lan743x_phylink_2500basex_config(adapter);
2992		if (ret < 0)
2993			netif_err(adapter, drv, adapter->netdev,
2994				  "2500BASEX config failed. Error %d\n", ret);
2995		else
2996			netif_dbg(adapter, drv, adapter->netdev,
2997				  "2500BASEX mode selected and configured\n");
2998		break;
2999	case PHY_INTERFACE_MODE_1000BASEX:
3000		ret = lan743x_phylink_1000basex_config(adapter);
3001		if (ret < 0)
3002			netif_err(adapter, drv, adapter->netdev,
3003				  "1000BASEX config failed. Error %d\n", ret);
3004		else
3005			netif_dbg(adapter, drv, adapter->netdev,
3006				  "1000BASEX mode selected and configured\n");
3007		break;
3008	case PHY_INTERFACE_MODE_SGMII:
3009		ret = lan743x_phylink_sgmii_config(adapter);
3010		if (ret < 0)
3011			netif_err(adapter, drv, adapter->netdev,
3012				  "SGMII config failed. Error %d\n", ret);
3013		else
3014			netif_dbg(adapter, drv, adapter->netdev,
3015				  "SGMII mode selected and configured\n");
3016		break;
3017	default:
3018		netif_dbg(adapter, drv, adapter->netdev,
3019			  "RGMII/GMII/MII(0x%X) mode enable\n",
3020			  state->interface);
3021		break;
3022	}
3023}
3024
3025static void lan743x_phylink_mac_link_down(struct phylink_config *config,
3026					  unsigned int link_an_mode,
3027					  phy_interface_t interface)
3028{
3029	struct net_device *netdev = to_net_dev(config->dev);
3030	struct lan743x_adapter *adapter = netdev_priv(netdev);
3031
3032	netif_tx_stop_all_queues(to_net_dev(config->dev));
3033	lan743x_mac_eee_enable(adapter, false);
3034}
3035
3036static void lan743x_phylink_mac_link_up(struct phylink_config *config,
3037					struct phy_device *phydev,
3038					unsigned int link_an_mode,
3039					phy_interface_t interface,
3040					int speed, int duplex,
3041					bool tx_pause, bool rx_pause)
3042{
3043	struct net_device *netdev = to_net_dev(config->dev);
3044	struct lan743x_adapter *adapter = netdev_priv(netdev);
3045	int mac_cr;
3046	u8 cap;
3047
3048	mac_cr = lan743x_csr_read(adapter, MAC_CR);
3049	/* Pre-initialize register bits.
3050	 * Resulting value corresponds to SPEED_10
3051	 */
3052	mac_cr &= ~(MAC_CR_CFG_H_ | MAC_CR_CFG_L_);
3053	if (speed == SPEED_2500)
3054		mac_cr |= MAC_CR_CFG_H_ | MAC_CR_CFG_L_;
3055	else if (speed == SPEED_1000)
3056		mac_cr |= MAC_CR_CFG_H_;
3057	else if (speed == SPEED_100)
3058		mac_cr |= MAC_CR_CFG_L_;
3059
3060	lan743x_csr_write(adapter, MAC_CR, mac_cr);
3061
3062	lan743x_ptp_update_latency(adapter, speed);
3063
3064	/* Flow Control operation */
3065	cap = 0;
3066	if (tx_pause)
3067		cap |= FLOW_CTRL_TX;
3068	if (rx_pause)
3069		cap |= FLOW_CTRL_RX;
3070
3071	lan743x_mac_flow_ctrl_set_enables(adapter,
3072					  cap & FLOW_CTRL_TX,
3073					  cap & FLOW_CTRL_RX);
3074
3075	if (phydev)
3076		lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi);
3077
3078	netif_tx_wake_all_queues(netdev);
3079}
3080
3081static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
3082	.mac_config = lan743x_phylink_mac_config,
3083	.mac_link_down = lan743x_phylink_mac_link_down,
3084	.mac_link_up = lan743x_phylink_mac_link_up,
3085};
3086
3087static int lan743x_phylink_create(struct lan743x_adapter *adapter)
3088{
3089	struct net_device *netdev = adapter->netdev;
3090	struct phylink *pl;
3091
3092	adapter->phylink_config.dev = &netdev->dev;
3093	adapter->phylink_config.type = PHYLINK_NETDEV;
3094	adapter->phylink_config.mac_managed_pm = false;
3095
3096	adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
3097		MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
3098
3099	lan743x_phy_interface_select(adapter);
3100
3101	switch (adapter->phy_interface) {
3102	case PHY_INTERFACE_MODE_SGMII:
3103		__set_bit(PHY_INTERFACE_MODE_SGMII,
3104			  adapter->phylink_config.supported_interfaces);
3105		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
3106			  adapter->phylink_config.supported_interfaces);
3107		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
3108			  adapter->phylink_config.supported_interfaces);
3109		adapter->phylink_config.mac_capabilities |= MAC_2500FD;
3110		break;
3111	case PHY_INTERFACE_MODE_GMII:
3112		__set_bit(PHY_INTERFACE_MODE_GMII,
3113			  adapter->phylink_config.supported_interfaces);
3114		break;
3115	case PHY_INTERFACE_MODE_MII:
3116		__set_bit(PHY_INTERFACE_MODE_MII,
3117			  adapter->phylink_config.supported_interfaces);
3118		break;
3119	default:
3120		phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
3121	}
3122
3123	pl = phylink_create(&adapter->phylink_config, NULL,
3124			    adapter->phy_interface, &lan743x_phylink_mac_ops);
3125
3126	if (IS_ERR(pl)) {
3127		netdev_err(netdev, "Could not create phylink (%pe)\n", pl);
3128		return PTR_ERR(pl);
3129	}
3130
3131	adapter->phylink = pl;
3132	netdev_dbg(netdev, "lan743x phylink created");
3133
3134	return 0;
3135}
3136
3137static bool lan743x_phy_handle_exists(struct device_node *dn)
3138{
3139	dn = of_parse_phandle(dn, "phy-handle", 0);
3140	of_node_put(dn);
3141	return dn != NULL;
3142}
3143
3144static int lan743x_phylink_connect(struct lan743x_adapter *adapter)
3145{
3146	struct device_node *dn = adapter->pdev->dev.of_node;
3147	struct net_device *dev = adapter->netdev;
3148	struct phy_device *phydev;
3149	int ret;
3150
3151	if (dn)
3152		ret = phylink_of_phy_connect(adapter->phylink, dn, 0);
3153
3154	if (!dn || (ret && !lan743x_phy_handle_exists(dn))) {
3155		phydev = phy_find_first(adapter->mdiobus);
3156		if (phydev) {
3157			/* attach the mac to the phy */
3158			ret = phylink_connect_phy(adapter->phylink, phydev);
3159		} else if (((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
3160			      ID_REV_ID_LAN7431_) || adapter->is_pci11x1x) {
3161			struct phylink_link_state state;
3162			unsigned long caps;
3163
3164			caps = adapter->phylink_config.mac_capabilities;
3165			if (caps & MAC_2500FD) {
3166				state.speed = SPEED_2500;
3167				state.duplex = DUPLEX_FULL;
3168			} else if (caps & MAC_1000FD) {
3169				state.speed = SPEED_1000;
3170				state.duplex = DUPLEX_FULL;
3171			} else {
3172				state.speed = SPEED_UNKNOWN;
3173				state.duplex = DUPLEX_UNKNOWN;
3174			}
3175
3176			ret = phylink_set_fixed_link(adapter->phylink, &state);
3177			if (ret) {
3178				netdev_err(dev, "Could not set fixed link\n");
3179				return ret;
3180			}
3181		} else {
3182			netdev_err(dev, "no PHY found\n");
3183			return -ENXIO;
3184		}
3185	}
3186
3187	if (ret) {
3188		netdev_err(dev, "Could not attach PHY (%d)\n", ret);
3189		return ret;
3190	}
3191
3192	phylink_start(adapter->phylink);
3193
3194	return 0;
3195}
3196
3197static void lan743x_phylink_disconnect(struct lan743x_adapter *adapter)
3198{
3199	phylink_stop(adapter->phylink);
3200	phylink_disconnect_phy(adapter->phylink);
3201}
3202
3203static int lan743x_netdev_close(struct net_device *netdev)
3204{
3205	struct lan743x_adapter *adapter = netdev_priv(netdev);
3206	int index;
3207
3208	for (index = 0; index < adapter->used_tx_channels; index++)
3209		lan743x_tx_close(&adapter->tx[index]);
3210
3211	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
3212		lan743x_rx_close(&adapter->rx[index]);
3213
3214	lan743x_ptp_close(adapter);
3215
3216	lan743x_phylink_disconnect(adapter);
3217
3218	lan743x_mac_close(adapter);
3219
3220	lan743x_intr_close(adapter);
3221
3222	return 0;
3223}
3224
3225static int lan743x_netdev_open(struct net_device *netdev)
3226{
3227	struct lan743x_adapter *adapter = netdev_priv(netdev);
3228	int index;
3229	int ret;
3230
3231	ret = lan743x_intr_open(adapter);
3232	if (ret)
3233		goto return_error;
3234
3235	ret = lan743x_mac_open(adapter);
3236	if (ret)
3237		goto close_intr;
3238
3239	ret = lan743x_phylink_connect(adapter);
3240	if (ret)
3241		goto close_mac;
3242
3243	ret = lan743x_ptp_open(adapter);
3244	if (ret)
3245		goto close_mac;
3246
3247	lan743x_rfe_open(adapter);
3248
3249	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3250		ret = lan743x_rx_open(&adapter->rx[index]);
3251		if (ret)
3252			goto close_rx;
3253	}
3254
3255	for (index = 0; index < adapter->used_tx_channels; index++) {
3256		ret = lan743x_tx_open(&adapter->tx[index]);
3257		if (ret)
3258			goto close_tx;
3259	}
3260
3261	if (netdev->phydev)
3262		phy_support_eee(netdev->phydev);
3263
3264#ifdef CONFIG_PM
3265	if (adapter->netdev->phydev) {
3266		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3267
3268		phy_ethtool_get_wol(netdev->phydev, &wol);
3269		adapter->phy_wol_supported = wol.supported;
3270		adapter->phy_wolopts = wol.wolopts;
3271	}
3272#endif
3273
3274	return 0;
3275
3276close_tx:
3277	for (index = 0; index < adapter->used_tx_channels; index++) {
3278		if (adapter->tx[index].ring_cpu_ptr)
3279			lan743x_tx_close(&adapter->tx[index]);
3280	}
3281
3282close_rx:
3283	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3284		if (adapter->rx[index].ring_cpu_ptr)
3285			lan743x_rx_close(&adapter->rx[index]);
3286	}
3287	lan743x_ptp_close(adapter);
3288	if (adapter->phylink)
3289		lan743x_phylink_disconnect(adapter);
 
3290
3291close_mac:
3292	lan743x_mac_close(adapter);
3293
3294close_intr:
3295	lan743x_intr_close(adapter);
3296
3297return_error:
3298	netif_warn(adapter, ifup, adapter->netdev,
3299		   "Error opening LAN743x\n");
3300	return ret;
3301}
3302
3303static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
3304					     struct net_device *netdev)
3305{
3306	struct lan743x_adapter *adapter = netdev_priv(netdev);
3307	u8 ch = 0;
3308
3309	if (adapter->is_pci11x1x)
3310		ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS;
3311
3312	return lan743x_tx_xmit_frame(&adapter->tx[ch], skb);
3313}
3314
3315static int lan743x_netdev_ioctl(struct net_device *netdev,
3316				struct ifreq *ifr, int cmd)
3317{
3318	struct lan743x_adapter *adapter = netdev_priv(netdev);
3319
3320	if (!netif_running(netdev))
3321		return -EINVAL;
3322	if (cmd == SIOCSHWTSTAMP)
3323		return lan743x_ptp_ioctl(netdev, ifr, cmd);
3324
3325	return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
3326}
3327
3328static void lan743x_netdev_set_multicast(struct net_device *netdev)
3329{
3330	struct lan743x_adapter *adapter = netdev_priv(netdev);
3331
3332	lan743x_rfe_set_multicast(adapter);
3333}
3334
3335static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
3336{
3337	struct lan743x_adapter *adapter = netdev_priv(netdev);
3338	int ret = 0;
3339
3340	ret = lan743x_mac_set_mtu(adapter, new_mtu);
3341	if (!ret)
3342		WRITE_ONCE(netdev->mtu, new_mtu);
3343	return ret;
3344}
3345
3346static void lan743x_netdev_get_stats64(struct net_device *netdev,
3347				       struct rtnl_link_stats64 *stats)
3348{
3349	struct lan743x_adapter *adapter = netdev_priv(netdev);
3350
3351	stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
3352	stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
3353	stats->rx_bytes = lan743x_csr_read(adapter,
3354					   STAT_RX_UNICAST_BYTE_COUNT) +
3355			  lan743x_csr_read(adapter,
3356					   STAT_RX_BROADCAST_BYTE_COUNT) +
3357			  lan743x_csr_read(adapter,
3358					   STAT_RX_MULTICAST_BYTE_COUNT);
3359	stats->tx_bytes = lan743x_csr_read(adapter,
3360					   STAT_TX_UNICAST_BYTE_COUNT) +
3361			  lan743x_csr_read(adapter,
3362					   STAT_TX_BROADCAST_BYTE_COUNT) +
3363			  lan743x_csr_read(adapter,
3364					   STAT_TX_MULTICAST_BYTE_COUNT);
3365	stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
3366			   lan743x_csr_read(adapter,
3367					    STAT_RX_ALIGNMENT_ERRORS) +
3368			   lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
3369			   lan743x_csr_read(adapter,
3370					    STAT_RX_UNDERSIZE_FRAME_ERRORS) +
3371			   lan743x_csr_read(adapter,
3372					    STAT_RX_OVERSIZE_FRAME_ERRORS);
3373	stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
3374			   lan743x_csr_read(adapter,
3375					    STAT_TX_EXCESS_DEFERRAL_ERRORS) +
3376			   lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
3377	stats->rx_dropped = lan743x_csr_read(adapter,
3378					     STAT_RX_DROPPED_FRAMES);
3379	stats->tx_dropped = lan743x_csr_read(adapter,
3380					     STAT_TX_EXCESSIVE_COLLISION);
3381	stats->multicast = lan743x_csr_read(adapter,
3382					    STAT_RX_MULTICAST_FRAMES) +
3383			   lan743x_csr_read(adapter,
3384					    STAT_TX_MULTICAST_FRAMES);
3385	stats->collisions = lan743x_csr_read(adapter,
3386					     STAT_TX_SINGLE_COLLISIONS) +
3387			    lan743x_csr_read(adapter,
3388					     STAT_TX_MULTIPLE_COLLISIONS) +
3389			    lan743x_csr_read(adapter,
3390					     STAT_TX_LATE_COLLISIONS);
3391}
3392
3393static int lan743x_netdev_set_mac_address(struct net_device *netdev,
3394					  void *addr)
3395{
3396	struct lan743x_adapter *adapter = netdev_priv(netdev);
3397	struct sockaddr *sock_addr = addr;
3398	int ret;
3399
3400	ret = eth_prepare_mac_addr_change(netdev, sock_addr);
3401	if (ret)
3402		return ret;
3403	eth_hw_addr_set(netdev, sock_addr->sa_data);
3404	lan743x_mac_set_address(adapter, sock_addr->sa_data);
3405	lan743x_rfe_update_mac_address(adapter);
3406	return 0;
3407}
3408
3409static const struct net_device_ops lan743x_netdev_ops = {
3410	.ndo_open		= lan743x_netdev_open,
3411	.ndo_stop		= lan743x_netdev_close,
3412	.ndo_start_xmit		= lan743x_netdev_xmit_frame,
3413	.ndo_eth_ioctl		= lan743x_netdev_ioctl,
3414	.ndo_set_rx_mode	= lan743x_netdev_set_multicast,
3415	.ndo_change_mtu		= lan743x_netdev_change_mtu,
3416	.ndo_get_stats64	= lan743x_netdev_get_stats64,
3417	.ndo_set_mac_address	= lan743x_netdev_set_mac_address,
3418};
3419
3420static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
3421{
3422	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
3423}
3424
3425static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
3426{
3427	mdiobus_unregister(adapter->mdiobus);
3428}
3429
3430static void lan743x_destroy_phylink(struct lan743x_adapter *adapter)
3431{
3432	phylink_destroy(adapter->phylink);
3433	adapter->phylink = NULL;
3434}
3435
3436static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
3437{
3438	unregister_netdev(adapter->netdev);
3439
3440	lan743x_destroy_phylink(adapter);
3441	lan743x_mdiobus_cleanup(adapter);
3442	lan743x_hardware_cleanup(adapter);
3443	lan743x_pci_cleanup(adapter);
3444}
3445
3446static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter)
3447{
3448	u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_;
3449
3450	if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) {
3451		u32 misc_ctl;
3452
3453		misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0);
3454		misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_;
3455		misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_,
3456				       RFE_RD_FIFO_TH_3_DWORDS);
3457		lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl);
3458	}
3459}
3460
3461static int lan743x_hardware_init(struct lan743x_adapter *adapter,
3462				 struct pci_dev *pdev)
3463{
3464	struct lan743x_tx *tx;
3465	int index;
3466	int ret;
3467
3468	adapter->is_pci11x1x = is_pci11x1x_chip(adapter);
3469	if (adapter->is_pci11x1x) {
3470		adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS;
3471		adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
3472		adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
3473		pci11x1x_strap_get_status(adapter);
3474		spin_lock_init(&adapter->eth_syslock_spinlock);
3475		mutex_init(&adapter->sgmii_rw_lock);
3476		pci11x1x_set_rfe_rd_fifo_threshold(adapter);
3477	} else {
3478		adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
3479		adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
3480		adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT;
3481	}
3482
3483	adapter->intr.irq = adapter->pdev->irq;
3484	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
3485
3486	ret = lan743x_gpio_init(adapter);
3487	if (ret)
3488		return ret;
3489
3490	ret = lan743x_mac_init(adapter);
3491	if (ret)
3492		return ret;
3493
3494	ret = lan743x_phy_init(adapter);
3495	if (ret)
3496		return ret;
3497
3498	ret = lan743x_ptp_init(adapter);
3499	if (ret)
3500		return ret;
3501
3502	lan743x_rfe_update_mac_address(adapter);
3503
3504	ret = lan743x_dmac_init(adapter);
3505	if (ret)
3506		return ret;
3507
3508	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3509		adapter->rx[index].adapter = adapter;
3510		adapter->rx[index].channel_number = index;
3511	}
3512
3513	for (index = 0; index < adapter->used_tx_channels; index++) {
3514		tx = &adapter->tx[index];
3515		tx->adapter = adapter;
3516		tx->channel_number = index;
3517		spin_lock_init(&tx->ring_lock);
3518	}
3519
3520	return 0;
3521}
3522
3523static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
3524{
3525	u32 sgmii_ctl;
3526	int ret;
3527
3528	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
3529	if (!(adapter->mdiobus)) {
3530		ret = -ENOMEM;
3531		goto return_error;
3532	}
3533
3534	adapter->mdiobus->priv = (void *)adapter;
3535	if (adapter->is_pci11x1x) {
3536		if (adapter->is_sgmii_en) {
3537			sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
3538			sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
3539			sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
3540			lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
3541			netif_dbg(adapter, drv, adapter->netdev,
3542				  "SGMII operation\n");
3543			adapter->mdiobus->read = lan743x_mdiobus_read_c22;
3544			adapter->mdiobus->write = lan743x_mdiobus_write_c22;
3545			adapter->mdiobus->read_c45 = lan743x_mdiobus_read_c45;
3546			adapter->mdiobus->write_c45 = lan743x_mdiobus_write_c45;
3547			adapter->mdiobus->name = "lan743x-mdiobus-c45";
3548			netif_dbg(adapter, drv, adapter->netdev,
3549				  "lan743x-mdiobus-c45\n");
3550		} else {
3551			sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
3552			sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
3553			sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
3554			lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
3555			netif_dbg(adapter, drv, adapter->netdev,
3556				  "RGMII operation\n");
3557			// Only C22 support when RGMII I/F
3558			adapter->mdiobus->read = lan743x_mdiobus_read_c22;
3559			adapter->mdiobus->write = lan743x_mdiobus_write_c22;
 
3560			adapter->mdiobus->name = "lan743x-mdiobus";
3561			netif_dbg(adapter, drv, adapter->netdev,
3562				  "lan743x-mdiobus\n");
3563		}
3564	} else {
3565		adapter->mdiobus->read = lan743x_mdiobus_read_c22;
3566		adapter->mdiobus->write = lan743x_mdiobus_write_c22;
3567		adapter->mdiobus->name = "lan743x-mdiobus";
3568		netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
3569	}
3570
3571	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
3572		 "pci-%s", pci_name(adapter->pdev));
3573
3574	if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
3575		/* LAN7430 uses internal phy at address 1 */
3576		adapter->mdiobus->phy_mask = ~(u32)BIT(1);
3577
3578	/* register mdiobus */
3579	ret = mdiobus_register(adapter->mdiobus);
3580	if (ret < 0)
3581		goto return_error;
3582	return 0;
3583
3584return_error:
3585	return ret;
3586}
3587
3588/* lan743x_pcidev_probe - Device Initialization Routine
3589 * @pdev: PCI device information struct
3590 * @id: entry in lan743x_pci_tbl
3591 *
3592 * Returns 0 on success, negative on failure
3593 *
3594 * initializes an adapter identified by a pci_dev structure.
3595 * The OS initialization, configuring of the adapter private structure,
3596 * and a hardware reset occur.
3597 **/
3598static int lan743x_pcidev_probe(struct pci_dev *pdev,
3599				const struct pci_device_id *id)
3600{
3601	struct lan743x_adapter *adapter = NULL;
3602	struct net_device *netdev = NULL;
3603	int ret = -ENODEV;
3604
3605	if (id->device == PCI_DEVICE_ID_SMSC_A011 ||
3606	    id->device == PCI_DEVICE_ID_SMSC_A041) {
3607		netdev = devm_alloc_etherdev_mqs(&pdev->dev,
3608						 sizeof(struct lan743x_adapter),
3609						 PCI11X1X_USED_TX_CHANNELS,
3610						 LAN743X_USED_RX_CHANNELS);
3611	} else {
3612		netdev = devm_alloc_etherdev_mqs(&pdev->dev,
3613						 sizeof(struct lan743x_adapter),
3614						 LAN743X_USED_TX_CHANNELS,
3615						 LAN743X_USED_RX_CHANNELS);
3616	}
3617
3618	if (!netdev)
3619		goto return_error;
3620
3621	SET_NETDEV_DEV(netdev, &pdev->dev);
3622	pci_set_drvdata(pdev, netdev);
3623	adapter = netdev_priv(netdev);
3624	adapter->netdev = netdev;
3625	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
3626			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
3627			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
3628	netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
3629
3630	of_get_mac_address(pdev->dev.of_node, adapter->mac_address);
3631
3632	ret = lan743x_pci_init(adapter, pdev);
3633	if (ret)
3634		goto return_error;
3635
3636	ret = lan743x_csr_init(adapter);
3637	if (ret)
3638		goto cleanup_pci;
3639
3640	ret = lan743x_hardware_init(adapter, pdev);
3641	if (ret)
3642		goto cleanup_pci;
3643
3644	ret = lan743x_mdiobus_init(adapter);
3645	if (ret)
3646		goto cleanup_hardware;
3647
3648	adapter->netdev->netdev_ops = &lan743x_netdev_ops;
3649	adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
3650	adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO |
3651				    NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3652	adapter->netdev->hw_features = adapter->netdev->features;
3653
3654	ret = lan743x_phylink_create(adapter);
3655	if (ret < 0) {
3656		netif_err(adapter, probe, netdev,
3657			  "failed to setup phylink (%d)\n", ret);
3658		goto cleanup_mdiobus;
3659	}
3660
3661	ret = register_netdev(adapter->netdev);
3662	if (ret < 0)
3663		goto cleanup_phylink;
3664	return 0;
3665
3666cleanup_phylink:
3667	lan743x_destroy_phylink(adapter);
3668
3669cleanup_mdiobus:
3670	lan743x_mdiobus_cleanup(adapter);
3671
3672cleanup_hardware:
3673	lan743x_hardware_cleanup(adapter);
3674
3675cleanup_pci:
3676	lan743x_pci_cleanup(adapter);
3677
3678return_error:
3679	pr_warn("Initialization failed\n");
3680	return ret;
3681}
3682
3683/**
3684 * lan743x_pcidev_remove - Device Removal Routine
3685 * @pdev: PCI device information struct
3686 *
3687 * this is called by the PCI subsystem to alert the driver
3688 * that it should release a PCI device.  This could be caused by a
3689 * Hot-Plug event, or because the driver is going to be removed from
3690 * memory.
3691 **/
3692static void lan743x_pcidev_remove(struct pci_dev *pdev)
3693{
3694	struct net_device *netdev = pci_get_drvdata(pdev);
3695	struct lan743x_adapter *adapter = netdev_priv(netdev);
3696
3697	lan743x_full_cleanup(adapter);
3698}
3699
3700static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
3701{
3702	struct net_device *netdev = pci_get_drvdata(pdev);
3703	struct lan743x_adapter *adapter = netdev_priv(netdev);
3704
3705	rtnl_lock();
3706	netif_device_detach(netdev);
3707
3708	/* close netdev when netdev is at running state.
3709	 * For instance, it is true when system goes to sleep by pm-suspend
3710	 * However, it is false when system goes to sleep by suspend GUI menu
3711	 */
3712	if (netif_running(netdev))
3713		lan743x_netdev_close(netdev);
3714	rtnl_unlock();
3715
3716#ifdef CONFIG_PM
3717	pci_save_state(pdev);
3718#endif
3719
3720	/* clean up lan743x portion */
3721	lan743x_hardware_cleanup(adapter);
3722}
3723
3724#ifdef CONFIG_PM_SLEEP
3725static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
3726{
3727	return bitrev16(crc16(0xFFFF, buf, len));
3728}
3729
3730static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
3731{
3732	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3733	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3734	const u8 arp_type[2] = { 0x08, 0x06 };
3735	int mask_index;
3736	u32 sopass;
3737	u32 pmtctl;
3738	u32 wucsr;
3739	u32 macrx;
3740	u16 crc;
3741
3742	for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
3743		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
3744
3745	/* clear wake settings */
3746	pmtctl = lan743x_csr_read(adapter, PMT_CTL);
3747	pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
3748	pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
3749		PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
3750		PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
3751
3752	macrx = lan743x_csr_read(adapter, MAC_RX);
3753
3754	wucsr = 0;
3755	mask_index = 0;
3756
3757	pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
3758
3759	if (adapter->phy_wolopts)
 
3760		pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
3761
3762	if (adapter->wolopts & WAKE_MAGIC) {
3763		wucsr |= MAC_WUCSR_MPEN_;
3764		macrx |= MAC_RX_RXEN_;
3765		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3766	}
3767	if (adapter->wolopts & WAKE_UCAST) {
3768		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
3769		macrx |= MAC_RX_RXEN_;
3770		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3771		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3772	}
3773	if (adapter->wolopts & WAKE_BCAST) {
3774		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
3775		macrx |= MAC_RX_RXEN_;
3776		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3777		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3778	}
3779	if (adapter->wolopts & WAKE_MCAST) {
3780		/* IPv4 multicast */
3781		crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
3782		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3783				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3784				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3785				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3786		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
3787		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3788		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3789		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3790		mask_index++;
3791
3792		/* IPv6 multicast */
3793		crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
3794		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3795				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3796				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3797				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3798		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
3799		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3800		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3801		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3802		mask_index++;
3803
3804		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3805		macrx |= MAC_RX_RXEN_;
3806		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3807		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3808	}
3809	if (adapter->wolopts & WAKE_ARP) {
3810		/* set MAC_WUF_CFG & WUF_MASK
3811		 * for packettype (offset 12,13) = ARP (0x0806)
3812		 */
3813		crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
3814		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3815				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
3816				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3817				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3818		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
3819		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3820		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3821		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3822		mask_index++;
3823
3824		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3825		macrx |= MAC_RX_RXEN_;
3826		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3827		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3828	}
3829
3830	if (adapter->wolopts & WAKE_MAGICSECURE) {
3831		sopass = *(u32 *)adapter->sopass;
3832		lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass);
3833		sopass = *(u16 *)&adapter->sopass[4];
3834		lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass);
3835		wucsr |= MAC_MP_SO_EN_;
3836	}
3837
3838	lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
3839	lan743x_csr_write(adapter, PMT_CTL, pmtctl);
3840	lan743x_csr_write(adapter, MAC_RX, macrx);
3841}
3842
3843static int lan743x_pm_suspend(struct device *dev)
3844{
3845	struct pci_dev *pdev = to_pci_dev(dev);
3846	struct net_device *netdev = pci_get_drvdata(pdev);
3847	struct lan743x_adapter *adapter = netdev_priv(netdev);
3848	u32 data;
3849
3850	lan743x_pcidev_shutdown(pdev);
3851
3852	/* clear all wakes */
3853	lan743x_csr_write(adapter, MAC_WUCSR, 0);
3854	lan743x_csr_write(adapter, MAC_WUCSR2, 0);
3855	lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
3856
3857	if (adapter->wolopts || adapter->phy_wolopts)
3858		lan743x_pm_set_wol(adapter);
3859
3860	if (adapter->is_pci11x1x) {
3861		/* Save HW_CFG to config again in PM resume */
3862		data = lan743x_csr_read(adapter, HW_CFG);
3863		adapter->hw_cfg = data;
3864		data |= (HW_CFG_RST_PROTECT_PCIE_ |
3865			 HW_CFG_D3_RESET_DIS_ |
3866			 HW_CFG_D3_VAUX_OVR_ |
3867			 HW_CFG_HOT_RESET_DIS_ |
3868			 HW_CFG_RST_PROTECT_);
3869		lan743x_csr_write(adapter, HW_CFG, data);
3870	}
3871
3872	/* Host sets PME_En, put D3hot */
3873	return pci_prepare_to_sleep(pdev);
3874}
3875
3876static int lan743x_pm_resume(struct device *dev)
3877{
3878	struct pci_dev *pdev = to_pci_dev(dev);
3879	struct net_device *netdev = pci_get_drvdata(pdev);
3880	struct lan743x_adapter *adapter = netdev_priv(netdev);
3881	u32 data;
3882	int ret;
3883
3884	pci_set_power_state(pdev, PCI_D0);
3885	pci_restore_state(pdev);
3886	pci_save_state(pdev);
3887
3888	/* Restore HW_CFG that was saved during pm suspend */
3889	if (adapter->is_pci11x1x)
3890		lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg);
3891
3892	ret = lan743x_hardware_init(adapter, pdev);
3893	if (ret) {
3894		netif_err(adapter, probe, adapter->netdev,
3895			  "lan743x_hardware_init returned %d\n", ret);
3896		lan743x_pci_cleanup(adapter);
3897		return ret;
3898	}
3899
3900	ret = lan743x_csr_read(adapter, MAC_WK_SRC);
3901	netif_dbg(adapter, drv, adapter->netdev,
3902		  "Wakeup source : 0x%08X\n", ret);
3903
3904	/* Clear the wol configuration and status bits. Note that
3905	 * the status bits are "Write One to Clear (W1C)"
3906	 */
3907	data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
3908	       MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
3909	       MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
3910	lan743x_csr_write(adapter, MAC_WUCSR, data);
3911
3912	data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
3913	       MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
3914	lan743x_csr_write(adapter, MAC_WUCSR2, data);
3915
3916	data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
3917	       MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
3918	       MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
3919	       MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
3920	       MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
3921	       MAC_WK_SRC_WK_FR_SAVED_;
3922	lan743x_csr_write(adapter, MAC_WK_SRC, data);
3923
3924	rtnl_lock();
3925	/* open netdev when netdev is at running state while resume.
3926	 * For instance, it is true when system wakesup after pm-suspend
3927	 * However, it is false when system wakes up after suspend GUI menu
3928	 */
3929	if (netif_running(netdev))
3930		lan743x_netdev_open(netdev);
3931
3932	netif_device_attach(netdev);
3933	rtnl_unlock();
 
 
3934
3935	return 0;
3936}
3937
3938static const struct dev_pm_ops lan743x_pm_ops = {
3939	SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3940};
3941#endif /* CONFIG_PM_SLEEP */
3942
3943static const struct pci_device_id lan743x_pcidev_tbl[] = {
3944	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
3945	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
3946	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) },
3947	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) },
3948	{ 0, }
3949};
3950
3951MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
3952
3953static struct pci_driver lan743x_pcidev_driver = {
3954	.name     = DRIVER_NAME,
3955	.id_table = lan743x_pcidev_tbl,
3956	.probe    = lan743x_pcidev_probe,
3957	.remove   = lan743x_pcidev_remove,
3958#ifdef CONFIG_PM_SLEEP
3959	.driver.pm = &lan743x_pm_ops,
3960#endif
3961	.shutdown = lan743x_pcidev_shutdown,
3962};
3963
3964module_pci_driver(lan743x_pcidev_driver);
3965
3966MODULE_AUTHOR(DRIVER_AUTHOR);
3967MODULE_DESCRIPTION(DRIVER_DESC);
3968MODULE_LICENSE("GPL");
v6.2
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/* Copyright (C) 2018 Microchip Technology Inc. */
   3
   4#include <linux/module.h>
   5#include <linux/pci.h>
   6#include <linux/netdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/crc32.h>
   9#include <linux/microchipphy.h>
  10#include <linux/net_tstamp.h>
  11#include <linux/of_mdio.h>
  12#include <linux/of_net.h>
  13#include <linux/phy.h>
  14#include <linux/phy_fixed.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/iopoll.h>
  17#include <linux/crc16.h>
 
  18#include "lan743x_main.h"
  19#include "lan743x_ethtool.h"
  20
  21#define MMD_ACCESS_ADDRESS	0
  22#define MMD_ACCESS_WRITE	1
  23#define MMD_ACCESS_READ		2
  24#define MMD_ACCESS_READ_INC	3
  25#define PCS_POWER_STATE_DOWN	0x6
  26#define PCS_POWER_STATE_UP	0x4
  27
 
 
  28static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
  29{
  30	u32 chip_rev;
  31	u32 cfg_load;
  32	u32 hw_cfg;
  33	u32 strap;
  34	int ret;
  35
  36	/* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */
  37	ret = lan743x_hs_syslock_acquire(adapter, 100);
  38	if (ret < 0) {
  39		netif_err(adapter, drv, adapter->netdev,
  40			  "Sys Lock acquire failed ret:%d\n", ret);
  41		return;
  42	}
  43
  44	cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG);
  45	lan743x_hs_syslock_release(adapter);
  46	hw_cfg = lan743x_csr_read(adapter, HW_CFG);
  47
  48	if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ ||
  49	    hw_cfg & HW_CFG_RST_PROTECT_) {
  50		strap = lan743x_csr_read(adapter, STRAP_READ);
  51		if (strap & STRAP_READ_SGMII_EN_)
  52			adapter->is_sgmii_en = true;
  53		else
  54			adapter->is_sgmii_en = false;
  55	} else {
  56		chip_rev = lan743x_csr_read(adapter, FPGA_REV);
  57		if (chip_rev) {
  58			if (chip_rev & FPGA_SGMII_OP)
  59				adapter->is_sgmii_en = true;
  60			else
  61				adapter->is_sgmii_en = false;
  62		} else {
  63			adapter->is_sgmii_en = false;
  64		}
  65	}
  66	netif_dbg(adapter, drv, adapter->netdev,
  67		  "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis");
  68}
  69
  70static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
  71{
  72	struct lan743x_csr *csr = &adapter->csr;
  73	u32 id_rev = csr->id_rev;
  74
  75	if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) ||
  76	    ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) {
  77		return true;
  78	}
  79	return false;
  80}
  81
  82static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
  83{
  84	pci_release_selected_regions(adapter->pdev,
  85				     pci_select_bars(adapter->pdev,
  86						     IORESOURCE_MEM));
  87	pci_disable_device(adapter->pdev);
  88}
  89
  90static int lan743x_pci_init(struct lan743x_adapter *adapter,
  91			    struct pci_dev *pdev)
  92{
  93	unsigned long bars = 0;
  94	int ret;
  95
  96	adapter->pdev = pdev;
  97	ret = pci_enable_device_mem(pdev);
  98	if (ret)
  99		goto return_error;
 100
 101	netif_info(adapter, probe, adapter->netdev,
 102		   "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
 103		   pdev->vendor, pdev->device);
 104	bars = pci_select_bars(pdev, IORESOURCE_MEM);
 105	if (!test_bit(0, &bars))
 106		goto disable_device;
 107
 108	ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
 109	if (ret)
 110		goto disable_device;
 111
 112	pci_set_master(pdev);
 113	return 0;
 114
 115disable_device:
 116	pci_disable_device(adapter->pdev);
 117
 118return_error:
 119	return ret;
 120}
 121
 122u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
 123{
 124	return ioread32(&adapter->csr.csr_address[offset]);
 125}
 126
 127void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
 128		       u32 data)
 129{
 130	iowrite32(data, &adapter->csr.csr_address[offset]);
 131}
 132
 133#define LAN743X_CSR_READ_OP(offset)	lan743x_csr_read(adapter, offset)
 134
 135static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
 136{
 137	u32 data;
 138
 139	data = lan743x_csr_read(adapter, HW_CFG);
 140	data |= HW_CFG_LRST_;
 141	lan743x_csr_write(adapter, HW_CFG, data);
 142
 143	return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
 144				  !(data & HW_CFG_LRST_), 100000, 10000000);
 145}
 146
 
 
 
 
 
 
 
 
 
 
 
 
 147static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
 148				    int offset, u32 bit_mask,
 149				    int target_value, int usleep_min,
 150				    int usleep_max, int count)
 151{
 152	u32 data;
 153
 154	return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
 155				  target_value == ((data & bit_mask) ? 1 : 0),
 156				  usleep_max, usleep_min * count);
 157}
 158
 159static int lan743x_csr_init(struct lan743x_adapter *adapter)
 160{
 161	struct lan743x_csr *csr = &adapter->csr;
 162	resource_size_t bar_start, bar_length;
 163	int result;
 164
 165	bar_start = pci_resource_start(adapter->pdev, 0);
 166	bar_length = pci_resource_len(adapter->pdev, 0);
 167	csr->csr_address = devm_ioremap(&adapter->pdev->dev,
 168					bar_start, bar_length);
 169	if (!csr->csr_address) {
 170		result = -ENOMEM;
 171		goto clean_up;
 172	}
 173
 174	csr->id_rev = lan743x_csr_read(adapter, ID_REV);
 175	csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
 176	netif_info(adapter, probe, adapter->netdev,
 177		   "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
 178		   csr->id_rev,	FPGA_REV_GET_MAJOR_(csr->fpga_rev),
 179		   FPGA_REV_GET_MINOR_(csr->fpga_rev));
 180	if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) {
 181		result = -ENODEV;
 182		goto clean_up;
 183	}
 184
 185	csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 186	switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
 187	case ID_REV_CHIP_REV_A0_:
 188		csr->flags |= LAN743X_CSR_FLAG_IS_A0;
 189		csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 190		break;
 191	case ID_REV_CHIP_REV_B0_:
 192		csr->flags |= LAN743X_CSR_FLAG_IS_B0;
 193		break;
 194	}
 195
 196	result = lan743x_csr_light_reset(adapter);
 197	if (result)
 198		goto clean_up;
 199	return 0;
 200clean_up:
 201	return result;
 202}
 203
 204static void lan743x_intr_software_isr(struct lan743x_adapter *adapter)
 205{
 206	struct lan743x_intr *intr = &adapter->intr;
 207
 208	/* disable the interrupt to prevent repeated re-triggering */
 209	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
 210	intr->software_isr_flag = true;
 211	wake_up(&intr->software_isr_wq);
 212}
 213
 214static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
 215{
 216	struct lan743x_tx *tx = context;
 217	struct lan743x_adapter *adapter = tx->adapter;
 218	bool enable_flag = true;
 219
 220	lan743x_csr_read(adapter, INT_EN_SET);
 221	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 222		lan743x_csr_write(adapter, INT_EN_CLR,
 223				  INT_BIT_DMA_TX_(tx->channel_number));
 224	}
 225
 226	if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
 227		u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
 228		u32 dmac_int_sts;
 229		u32 dmac_int_en;
 230
 231		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 232			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 233		else
 234			dmac_int_sts = ioc_bit;
 235		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 236			dmac_int_en = lan743x_csr_read(adapter,
 237						       DMAC_INT_EN_SET);
 238		else
 239			dmac_int_en = ioc_bit;
 240
 241		dmac_int_en &= ioc_bit;
 242		dmac_int_sts &= dmac_int_en;
 243		if (dmac_int_sts & ioc_bit) {
 244			napi_schedule(&tx->napi);
 245			enable_flag = false;/* poll func will enable later */
 246		}
 247	}
 248
 249	if (enable_flag)
 250		/* enable isr */
 251		lan743x_csr_write(adapter, INT_EN_SET,
 252				  INT_BIT_DMA_TX_(tx->channel_number));
 253}
 254
 255static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
 256{
 257	struct lan743x_rx *rx = context;
 258	struct lan743x_adapter *adapter = rx->adapter;
 259	bool enable_flag = true;
 260
 261	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 262		lan743x_csr_write(adapter, INT_EN_CLR,
 263				  INT_BIT_DMA_RX_(rx->channel_number));
 264	}
 265
 266	if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
 267		u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
 268		u32 dmac_int_sts;
 269		u32 dmac_int_en;
 270
 271		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 272			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 273		else
 274			dmac_int_sts = rx_frame_bit;
 275		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 276			dmac_int_en = lan743x_csr_read(adapter,
 277						       DMAC_INT_EN_SET);
 278		else
 279			dmac_int_en = rx_frame_bit;
 280
 281		dmac_int_en &= rx_frame_bit;
 282		dmac_int_sts &= dmac_int_en;
 283		if (dmac_int_sts & rx_frame_bit) {
 284			napi_schedule(&rx->napi);
 285			enable_flag = false;/* poll funct will enable later */
 286		}
 287	}
 288
 289	if (enable_flag) {
 290		/* enable isr */
 291		lan743x_csr_write(adapter, INT_EN_SET,
 292				  INT_BIT_DMA_RX_(rx->channel_number));
 293	}
 294}
 295
 296static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
 297{
 298	struct lan743x_adapter *adapter = context;
 299	unsigned int channel;
 300
 301	if (int_sts & INT_BIT_ALL_RX_) {
 302		for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
 303			channel++) {
 304			u32 int_bit = INT_BIT_DMA_RX_(channel);
 305
 306			if (int_sts & int_bit) {
 307				lan743x_rx_isr(&adapter->rx[channel],
 308					       int_bit, flags);
 309				int_sts &= ~int_bit;
 310			}
 311		}
 312	}
 313	if (int_sts & INT_BIT_ALL_TX_) {
 314		for (channel = 0; channel < adapter->used_tx_channels;
 315			channel++) {
 316			u32 int_bit = INT_BIT_DMA_TX_(channel);
 317
 318			if (int_sts & int_bit) {
 319				lan743x_tx_isr(&adapter->tx[channel],
 320					       int_bit, flags);
 321				int_sts &= ~int_bit;
 322			}
 323		}
 324	}
 325	if (int_sts & INT_BIT_ALL_OTHER_) {
 326		if (int_sts & INT_BIT_SW_GP_) {
 327			lan743x_intr_software_isr(adapter);
 328			int_sts &= ~INT_BIT_SW_GP_;
 329		}
 330		if (int_sts & INT_BIT_1588_) {
 331			lan743x_ptp_isr(adapter);
 332			int_sts &= ~INT_BIT_1588_;
 333		}
 334	}
 335	if (int_sts)
 336		lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
 337}
 338
 339static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
 340{
 341	struct lan743x_vector *vector = ptr;
 342	struct lan743x_adapter *adapter = vector->adapter;
 343	irqreturn_t result = IRQ_NONE;
 344	u32 int_enables;
 345	u32 int_sts;
 346
 347	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
 348		int_sts = lan743x_csr_read(adapter, INT_STS);
 349	} else if (vector->flags &
 350		   (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
 351		   LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
 352		int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
 353	} else {
 354		/* use mask as implied status */
 355		int_sts = vector->int_mask | INT_BIT_MAS_;
 356	}
 357
 358	if (!(int_sts & INT_BIT_MAS_))
 359		goto irq_done;
 360
 361	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
 362		/* disable vector interrupt */
 363		lan743x_csr_write(adapter,
 364				  INT_VEC_EN_CLR,
 365				  INT_VEC_EN_(vector->vector_index));
 366
 367	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
 368		/* disable master interrupt */
 369		lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 370
 371	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
 372		int_enables = lan743x_csr_read(adapter, INT_EN_SET);
 373	} else {
 374		/*  use vector mask as implied enable mask */
 375		int_enables = vector->int_mask;
 376	}
 377
 378	int_sts &= int_enables;
 379	int_sts &= vector->int_mask;
 380	if (int_sts) {
 381		if (vector->handler) {
 382			vector->handler(vector->context,
 383					int_sts, vector->flags);
 384		} else {
 385			/* disable interrupts on this vector */
 386			lan743x_csr_write(adapter, INT_EN_CLR,
 387					  vector->int_mask);
 388		}
 389		result = IRQ_HANDLED;
 390	}
 391
 392	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
 393		/* enable master interrupt */
 394		lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 395
 396	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
 397		/* enable vector interrupt */
 398		lan743x_csr_write(adapter,
 399				  INT_VEC_EN_SET,
 400				  INT_VEC_EN_(vector->vector_index));
 401irq_done:
 402	return result;
 403}
 404
 405static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
 406{
 407	struct lan743x_intr *intr = &adapter->intr;
 408	int ret;
 409
 410	intr->software_isr_flag = false;
 411
 412	/* enable and activate test interrupt */
 413	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
 414	lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
 415
 416	ret = wait_event_timeout(intr->software_isr_wq,
 417				 intr->software_isr_flag,
 418				 msecs_to_jiffies(200));
 419
 420	/* disable test interrupt */
 421	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
 422
 423	return ret > 0 ? 0 : -ENODEV;
 424}
 425
 426static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
 427				     int vector_index, u32 flags,
 428				     u32 int_mask,
 429				     lan743x_vector_handler handler,
 430				     void *context)
 431{
 432	struct lan743x_vector *vector = &adapter->intr.vector_list
 433					[vector_index];
 434	int ret;
 435
 436	vector->adapter = adapter;
 437	vector->flags = flags;
 438	vector->vector_index = vector_index;
 439	vector->int_mask = int_mask;
 440	vector->handler = handler;
 441	vector->context = context;
 442
 443	ret = request_irq(vector->irq,
 444			  lan743x_intr_entry_isr,
 445			  (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
 446			  IRQF_SHARED : 0, DRIVER_NAME, vector);
 447	if (ret) {
 448		vector->handler = NULL;
 449		vector->context = NULL;
 450		vector->int_mask = 0;
 451		vector->flags = 0;
 452	}
 453	return ret;
 454}
 455
 456static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
 457					int vector_index)
 458{
 459	struct lan743x_vector *vector = &adapter->intr.vector_list
 460					[vector_index];
 461
 462	free_irq(vector->irq, vector);
 463	vector->handler = NULL;
 464	vector->context = NULL;
 465	vector->int_mask = 0;
 466	vector->flags = 0;
 467}
 468
 469static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
 470					 u32 int_mask)
 471{
 472	int index;
 473
 474	for (index = 0; index < adapter->max_vector_count; index++) {
 475		if (adapter->intr.vector_list[index].int_mask & int_mask)
 476			return adapter->intr.vector_list[index].flags;
 477	}
 478	return 0;
 479}
 480
 481static void lan743x_intr_close(struct lan743x_adapter *adapter)
 482{
 483	struct lan743x_intr *intr = &adapter->intr;
 484	int index = 0;
 485
 486	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 487	if (adapter->is_pci11x1x)
 488		lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF);
 489	else
 490		lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
 491
 492	for (index = 0; index < intr->number_of_vectors; index++) {
 493		if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
 494			lan743x_intr_unregister_isr(adapter, index);
 495			intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
 496		}
 497	}
 498
 499	if (intr->flags & INTR_FLAG_MSI_ENABLED) {
 500		pci_disable_msi(adapter->pdev);
 501		intr->flags &= ~INTR_FLAG_MSI_ENABLED;
 502	}
 503
 504	if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
 505		pci_disable_msix(adapter->pdev);
 506		intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
 507	}
 508}
 509
 510static int lan743x_intr_open(struct lan743x_adapter *adapter)
 511{
 512	struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT];
 513	struct lan743x_intr *intr = &adapter->intr;
 514	unsigned int used_tx_channels;
 515	u32 int_vec_en_auto_clr = 0;
 516	u8 max_vector_count;
 517	u32 int_vec_map0 = 0;
 518	u32 int_vec_map1 = 0;
 519	int ret = -ENODEV;
 520	int index = 0;
 521	u32 flags = 0;
 522
 523	intr->number_of_vectors = 0;
 524
 525	/* Try to set up MSIX interrupts */
 526	max_vector_count = adapter->max_vector_count;
 527	memset(&msix_entries[0], 0,
 528	       sizeof(struct msix_entry) * max_vector_count);
 529	for (index = 0; index < max_vector_count; index++)
 530		msix_entries[index].entry = index;
 531	used_tx_channels = adapter->used_tx_channels;
 532	ret = pci_enable_msix_range(adapter->pdev,
 533				    msix_entries, 1,
 534				    1 + used_tx_channels +
 535				    LAN743X_USED_RX_CHANNELS);
 536
 537	if (ret > 0) {
 538		intr->flags |= INTR_FLAG_MSIX_ENABLED;
 539		intr->number_of_vectors = ret;
 540		intr->using_vectors = true;
 541		for (index = 0; index < intr->number_of_vectors; index++)
 542			intr->vector_list[index].irq = msix_entries
 543						       [index].vector;
 544		netif_info(adapter, ifup, adapter->netdev,
 545			   "using MSIX interrupts, number of vectors = %d\n",
 546			   intr->number_of_vectors);
 547	}
 548
 549	/* If MSIX failed try to setup using MSI interrupts */
 550	if (!intr->number_of_vectors) {
 551		if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 552			if (!pci_enable_msi(adapter->pdev)) {
 553				intr->flags |= INTR_FLAG_MSI_ENABLED;
 554				intr->number_of_vectors = 1;
 555				intr->using_vectors = true;
 556				intr->vector_list[0].irq =
 557					adapter->pdev->irq;
 558				netif_info(adapter, ifup, adapter->netdev,
 559					   "using MSI interrupts, number of vectors = %d\n",
 560					   intr->number_of_vectors);
 561			}
 562		}
 563	}
 564
 565	/* If MSIX, and MSI failed, setup using legacy interrupt */
 566	if (!intr->number_of_vectors) {
 567		intr->number_of_vectors = 1;
 568		intr->using_vectors = false;
 569		intr->vector_list[0].irq = intr->irq;
 570		netif_info(adapter, ifup, adapter->netdev,
 571			   "using legacy interrupts\n");
 572	}
 573
 574	/* At this point we must have at least one irq */
 575	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
 576
 577	/* map all interrupts to vector 0 */
 578	lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
 579	lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
 580	lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
 581	flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 582		LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 583		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 584		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 585
 586	if (intr->using_vectors) {
 587		flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 588			 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 589	} else {
 590		flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
 591			 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
 592			 LAN743X_VECTOR_FLAG_IRQ_SHARED;
 593	}
 594
 595	if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 596		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
 597		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
 598		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 599		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
 600		flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
 601		flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
 602	}
 603
 604	init_waitqueue_head(&intr->software_isr_wq);
 605
 606	ret = lan743x_intr_register_isr(adapter, 0, flags,
 607					INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
 608					INT_BIT_ALL_OTHER_,
 609					lan743x_intr_shared_isr, adapter);
 610	if (ret)
 611		goto clean_up;
 612	intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
 613
 614	if (intr->using_vectors)
 615		lan743x_csr_write(adapter, INT_VEC_EN_SET,
 616				  INT_VEC_EN_(0));
 617
 618	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 619		lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
 620		lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
 621		lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
 622		lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
 623		lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
 624		lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
 625		lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
 626		lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
 627		if (adapter->is_pci11x1x) {
 628			lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD);
 629			lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD);
 630			lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654);
 631			lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210);
 632		} else {
 633			lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
 634			lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
 635		}
 636		lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
 637	}
 638
 639	/* enable interrupts */
 640	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 641	ret = lan743x_intr_test_isr(adapter);
 642	if (ret)
 643		goto clean_up;
 644
 645	if (intr->number_of_vectors > 1) {
 646		int number_of_tx_vectors = intr->number_of_vectors - 1;
 647
 648		if (number_of_tx_vectors > used_tx_channels)
 649			number_of_tx_vectors = used_tx_channels;
 650		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 651			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 652			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 653			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 654			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 655			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 656
 657		if (adapter->csr.flags &
 658		   LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 659			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 660				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 661				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 662				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 663		}
 664
 665		for (index = 0; index < number_of_tx_vectors; index++) {
 666			u32 int_bit = INT_BIT_DMA_TX_(index);
 667			int vector = index + 1;
 668
 669			/* map TX interrupt to vector */
 670			int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
 671			lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
 672
 673			/* Remove TX interrupt from shared mask */
 674			intr->vector_list[0].int_mask &= ~int_bit;
 675			ret = lan743x_intr_register_isr(adapter, vector, flags,
 676							int_bit, lan743x_tx_isr,
 677							&adapter->tx[index]);
 678			if (ret)
 679				goto clean_up;
 680			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 681			if (!(flags &
 682			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
 683				lan743x_csr_write(adapter, INT_VEC_EN_SET,
 684						  INT_VEC_EN_(vector));
 685		}
 686	}
 687	if ((intr->number_of_vectors - used_tx_channels) > 1) {
 688		int number_of_rx_vectors = intr->number_of_vectors -
 689						used_tx_channels - 1;
 690
 691		if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
 692			number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
 693
 694		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 695			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 696			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 697			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 698			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 699			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 700
 701		if (adapter->csr.flags &
 702		    LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 703			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
 704				LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 705				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 706				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 707				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 708		}
 709		for (index = 0; index < number_of_rx_vectors; index++) {
 710			int vector = index + 1 + used_tx_channels;
 711			u32 int_bit = INT_BIT_DMA_RX_(index);
 712
 713			/* map RX interrupt to vector */
 714			int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
 715			lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
 716			if (flags &
 717			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
 718				int_vec_en_auto_clr |= INT_VEC_EN_(vector);
 719				lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
 720						  int_vec_en_auto_clr);
 721			}
 722
 723			/* Remove RX interrupt from shared mask */
 724			intr->vector_list[0].int_mask &= ~int_bit;
 725			ret = lan743x_intr_register_isr(adapter, vector, flags,
 726							int_bit, lan743x_rx_isr,
 727							&adapter->rx[index]);
 728			if (ret)
 729				goto clean_up;
 730			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 731
 732			lan743x_csr_write(adapter, INT_VEC_EN_SET,
 733					  INT_VEC_EN_(vector));
 734		}
 735	}
 736	return 0;
 737
 738clean_up:
 739	lan743x_intr_close(adapter);
 740	return ret;
 741}
 742
 743static int lan743x_dp_write(struct lan743x_adapter *adapter,
 744			    u32 select, u32 addr, u32 length, u32 *buf)
 745{
 746	u32 dp_sel;
 747	int i;
 748
 749	if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
 750				     1, 40, 100, 100))
 751		return -EIO;
 752	dp_sel = lan743x_csr_read(adapter, DP_SEL);
 753	dp_sel &= ~DP_SEL_MASK_;
 754	dp_sel |= select;
 755	lan743x_csr_write(adapter, DP_SEL, dp_sel);
 756
 757	for (i = 0; i < length; i++) {
 758		lan743x_csr_write(adapter, DP_ADDR, addr + i);
 759		lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
 760		lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
 761		if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
 762					     1, 40, 100, 100))
 
 763			return -EIO;
 764	}
 765
 766	return 0;
 767}
 768
 769static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
 770{
 771	u32 ret;
 772
 773	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
 774		MAC_MII_ACC_PHY_ADDR_MASK_;
 775	ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
 776		MAC_MII_ACC_MIIRINDA_MASK_;
 777
 778	if (read)
 779		ret |= MAC_MII_ACC_MII_READ_;
 780	else
 781		ret |= MAC_MII_ACC_MII_WRITE_;
 782	ret |= MAC_MII_ACC_MII_BUSY_;
 783
 784	return ret;
 785}
 786
 787static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
 788{
 789	u32 data;
 790
 791	return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
 792				  !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
 793}
 794
 795static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
 796{
 797	struct lan743x_adapter *adapter = bus->priv;
 798	u32 val, mii_access;
 799	int ret;
 800
 801	/* comfirm MII not busy */
 802	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 803	if (ret < 0)
 804		return ret;
 805
 806	/* set the address, index & direction (read from PHY) */
 807	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
 808	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 809	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 810	if (ret < 0)
 811		return ret;
 812
 813	val = lan743x_csr_read(adapter, MAC_MII_DATA);
 814	return (int)(val & 0xFFFF);
 815}
 816
 817static int lan743x_mdiobus_write(struct mii_bus *bus,
 818				 int phy_id, int index, u16 regval)
 819{
 820	struct lan743x_adapter *adapter = bus->priv;
 821	u32 val, mii_access;
 822	int ret;
 823
 824	/* confirm MII not busy */
 825	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 826	if (ret < 0)
 827		return ret;
 828	val = (u32)regval;
 829	lan743x_csr_write(adapter, MAC_MII_DATA, val);
 830
 831	/* set the address, index & direction (write to PHY) */
 832	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
 833	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 834	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 835	return ret;
 836}
 837
 838static u32 lan743x_mac_mmd_access(int id, int index, int op)
 839{
 840	u16 dev_addr;
 841	u32 ret;
 842
 843	dev_addr = (index >> 16) & 0x1f;
 844	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
 845		MAC_MII_ACC_PHY_ADDR_MASK_;
 846	ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
 847		MAC_MII_ACC_MIIMMD_MASK_;
 848	if (op == MMD_ACCESS_WRITE)
 849		ret |= MAC_MII_ACC_MIICMD_WRITE_;
 850	else if (op == MMD_ACCESS_READ)
 851		ret |= MAC_MII_ACC_MIICMD_READ_;
 852	else if (op == MMD_ACCESS_READ_INC)
 853		ret |= MAC_MII_ACC_MIICMD_READ_INC_;
 854	else
 855		ret |= MAC_MII_ACC_MIICMD_ADDR_;
 856	ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_);
 857
 858	return ret;
 859}
 860
 861static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index)
 
 862{
 863	struct lan743x_adapter *adapter = bus->priv;
 864	u32 mmd_access;
 865	int ret;
 866
 867	/* comfirm MII not busy */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 868	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 869	if (ret < 0)
 870		return ret;
 871	if (index & MII_ADDR_C45) {
 872		/* Load Register Address */
 873		lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
 874		mmd_access = lan743x_mac_mmd_access(phy_id, index,
 875						    MMD_ACCESS_ADDRESS);
 876		lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 877		ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 878		if (ret < 0)
 879			return ret;
 880		/* Read Data */
 881		mmd_access = lan743x_mac_mmd_access(phy_id, index,
 882						    MMD_ACCESS_READ);
 883		lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 884		ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 885		if (ret < 0)
 886			return ret;
 887		ret = lan743x_csr_read(adapter, MAC_MII_DATA);
 888		return (int)(ret & 0xFFFF);
 889	}
 890
 891	ret = lan743x_mdiobus_read(bus, phy_id, index);
 892	return ret;
 893}
 894
 895static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
 896				     int phy_id, int index, u16 regval)
 897{
 898	struct lan743x_adapter *adapter = bus->priv;
 899	u32 mmd_access;
 900	int ret;
 901
 902	/* confirm MII not busy */
 903	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 904	if (ret < 0)
 905		return ret;
 906	if (index & MII_ADDR_C45) {
 907		/* Load Register Address */
 908		lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
 909		mmd_access = lan743x_mac_mmd_access(phy_id, index,
 910						    MMD_ACCESS_ADDRESS);
 911		lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 912		ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 913		if (ret < 0)
 914			return ret;
 915		/* Write Data */
 916		lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
 917		mmd_access = lan743x_mac_mmd_access(phy_id, index,
 918						    MMD_ACCESS_WRITE);
 919		lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 920		ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 921	} else {
 922		ret = lan743x_mdiobus_write(bus, phy_id, index, regval);
 923	}
 924
 925	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 926}
 927
 928static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
 929{
 930	u32 data;
 931	int ret;
 932
 933	ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data,
 934				 !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000);
 935	if (ret < 0)
 936		netif_err(adapter, drv, adapter->netdev,
 937			  "%s: error %d sgmii wait timeout\n", __func__, ret);
 938
 939	return ret;
 940}
 941
 942int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
 943{
 944	u32 mmd_access;
 945	int ret;
 946	u32 val;
 947
 948	if (mmd > 31) {
 949		netif_err(adapter, probe, adapter->netdev,
 950			  "%s mmd should <= 31\n", __func__);
 951		return -EINVAL;
 952	}
 953
 954	mutex_lock(&adapter->sgmii_rw_lock);
 955	/* Load Register Address */
 956	mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
 957	mmd_access |= (addr | SGMII_ACC_SGMII_BZY_);
 958	lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
 959	ret = lan743x_sgmii_wait_till_not_busy(adapter);
 960	if (ret < 0)
 961		goto sgmii_unlock;
 962
 963	val = lan743x_csr_read(adapter, SGMII_DATA);
 964	ret = (int)(val & SGMII_DATA_MASK_);
 965
 966sgmii_unlock:
 967	mutex_unlock(&adapter->sgmii_rw_lock);
 968
 969	return ret;
 970}
 971
 972static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
 973			       u8 mmd, u16 addr, u16 val)
 974{
 975	u32 mmd_access;
 976	int ret;
 977
 978	if (mmd > 31) {
 979		netif_err(adapter, probe, adapter->netdev,
 980			  "%s mmd should <= 31\n", __func__);
 981		return -EINVAL;
 982	}
 983	mutex_lock(&adapter->sgmii_rw_lock);
 984	/* Load Register Data */
 985	lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_));
 986	/* Load Register Address */
 987	mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
 988	mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_);
 989	lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
 990	ret = lan743x_sgmii_wait_till_not_busy(adapter);
 991	mutex_unlock(&adapter->sgmii_rw_lock);
 992
 993	return ret;
 994}
 995
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 996static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
 997				  u16 baud)
 998{
 999	int mpllctrl0;
1000	int mpllctrl1;
1001	int miscctrl1;
1002	int ret;
1003
1004	mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1005				       VR_MII_GEN2_4_MPLL_CTRL0);
1006	if (mpllctrl0 < 0)
1007		return mpllctrl0;
1008
1009	mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_;
1010	if (baud == VR_MII_BAUD_RATE_1P25GBPS) {
1011		mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100;
1012		/* mpll_baud_clk/4 */
1013		miscctrl1 = 0xA;
1014	} else {
1015		mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125;
1016		/* mpll_baud_clk/2 */
1017		miscctrl1 = 0x5;
1018	}
1019
1020	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1021				  VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0);
1022	if (ret < 0)
1023		return ret;
1024
1025	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1026				  VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1);
1027	if (ret < 0)
1028		return ret;
1029
1030	return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1031				  VR_MII_GEN2_4_MISC_CTRL1, miscctrl1);
1032}
1033
1034static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
1035				       bool enable)
1036{
1037	if (enable)
1038		return lan743x_sgmii_mpll_set(adapter,
1039					      VR_MII_BAUD_RATE_3P125GBPS);
1040	else
1041		return lan743x_sgmii_mpll_set(adapter,
1042					      VR_MII_BAUD_RATE_1P25GBPS);
1043}
1044
1045static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
1046				      bool *status)
1047{
1048	int ret;
1049
1050	ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1051				 VR_MII_GEN2_4_MPLL_CTRL1);
1052	if (ret < 0)
1053		return ret;
1054
1055	if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
1056	    ret == VR_MII_MPLL_MULTIPLIER_50)
1057		*status = true;
1058	else
1059		*status = false;
1060
1061	return 0;
1062}
1063
1064static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
1065{
1066	enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
1067	int mii_ctrl;
1068	int dgt_ctrl;
1069	int an_ctrl;
1070	int ret;
1071
1072	if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE)
1073		/* Switch to 2.5 Gbps */
1074		ret = lan743x_sgmii_2_5G_mode_set(adapter, true);
1075	else
1076		/* Switch to 10/100/1000 Mbps clock */
1077		ret = lan743x_sgmii_2_5G_mode_set(adapter, false);
1078	if (ret < 0)
1079		return ret;
1080
1081	/* Enable SGMII Auto NEG */
1082	mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
1083	if (mii_ctrl < 0)
1084		return mii_ctrl;
1085
1086	an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL);
1087	if (an_ctrl < 0)
1088		return an_ctrl;
1089
1090	dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1091				      VR_MII_DIG_CTRL1);
1092	if (dgt_ctrl < 0)
1093		return dgt_ctrl;
1094
1095	if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) {
1096		mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100);
1097		mii_ctrl |= BMCR_SPEED1000;
1098		dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
1099		dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
1100		/* In order for Auto-Negotiation to operate properly at
1101		 * 2.5 Gbps the 1.6ms link timer values must be adjusted
1102		 * The VR_MII_LINK_TIMER_CTRL Register must be set to
1103		 * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the
1104		 * VR_MII_DIG_CTRL1 Register set to 1
1105		 */
1106		ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1107					  VR_MII_LINK_TIMER_CTRL, 0x7A1);
1108		if (ret < 0)
1109			return ret;
1110	} else {
1111		mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1112		an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_;
1113		dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
1114		dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
1115	}
1116
1117	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR,
1118				  mii_ctrl);
1119	if (ret < 0)
1120		return ret;
1121
1122	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1123				  VR_MII_DIG_CTRL1, dgt_ctrl);
1124	if (ret < 0)
1125		return ret;
1126
1127	return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1128				  VR_MII_AN_CTRL, an_ctrl);
1129}
1130
1131static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
1132{
1133	u8 wait_cnt = 0;
1134	u32 dig_sts;
1135
1136	do {
1137		dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1138					     VR_MII_DIG_STS);
1139		if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >>
1140		      VR_MII_DIG_STS_PSEQ_STATE_POS_) == state)
1141			break;
1142		usleep_range(1000, 2000);
1143	} while (wait_cnt++ < 10);
1144
1145	if (wait_cnt >= 10)
1146		return -ETIMEDOUT;
1147
1148	return 0;
1149}
1150
1151static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
1152{
1153	struct net_device *netdev = adapter->netdev;
1154	struct phy_device *phydev = netdev->phydev;
1155	enum lan743x_sgmii_lsd lsd = POWER_DOWN;
1156	int mii_ctl;
1157	bool status;
1158	int ret;
1159
1160	switch (phydev->speed) {
1161	case SPEED_2500:
1162		if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
1163			lsd = LINK_2500_MASTER;
1164		else
1165			lsd = LINK_2500_SLAVE;
1166		break;
1167	case SPEED_1000:
1168		if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
1169			lsd = LINK_1000_MASTER;
1170		else
1171			lsd = LINK_1000_SLAVE;
1172		break;
1173	case SPEED_100:
1174		if (phydev->duplex)
1175			lsd = LINK_100FD;
1176		else
1177			lsd = LINK_100HD;
1178		break;
1179	case SPEED_10:
1180		if (phydev->duplex)
1181			lsd = LINK_10FD;
1182		else
1183			lsd = LINK_10HD;
1184		break;
1185	default:
1186		netif_err(adapter, drv, adapter->netdev,
1187			  "Invalid speed %d\n", phydev->speed);
1188		return -EINVAL;
1189	}
1190
1191	adapter->sgmii_lsd = lsd;
1192	ret = lan743x_sgmii_aneg_update(adapter);
1193	if (ret < 0) {
1194		netif_err(adapter, drv, adapter->netdev,
1195			  "error %d SGMII cfg failed\n", ret);
1196		return ret;
1197	}
1198
1199	ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
1200	if (ret < 0) {
1201		netif_err(adapter, drv, adapter->netdev,
1202			  "erro %d SGMII get mode failed\n", ret);
1203		return ret;
1204	}
1205
1206	if (status)
1207		netif_dbg(adapter, drv, adapter->netdev,
1208			  "SGMII 2.5G mode enable\n");
1209	else
1210		netif_dbg(adapter, drv, adapter->netdev,
1211			  "SGMII 1G mode enable\n");
1212
1213	/* SGMII/1000/2500BASE-X PCS power down */
1214	mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
1215	if (mii_ctl < 0)
1216		return mii_ctl;
1217
1218	mii_ctl |= BMCR_PDOWN;
1219	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
1220	if (ret < 0)
1221		return ret;
1222
1223	ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN);
1224	if (ret < 0)
1225		return ret;
1226
1227	/* SGMII/1000/2500BASE-X PCS power up */
1228	mii_ctl &= ~BMCR_PDOWN;
1229	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
1230	if (ret < 0)
1231		return ret;
1232
1233	ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
1234	if (ret < 0)
1235		return ret;
1236
1237	return 0;
1238}
1239
1240static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
1241				    u8 *addr)
1242{
1243	u32 addr_lo, addr_hi;
1244
1245	addr_lo = addr[0] |
1246		addr[1] << 8 |
1247		addr[2] << 16 |
1248		addr[3] << 24;
1249	addr_hi = addr[4] |
1250		addr[5] << 8;
1251	lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
1252	lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
1253
1254	ether_addr_copy(adapter->mac_address, addr);
1255	netif_info(adapter, drv, adapter->netdev,
1256		   "MAC address set to %pM\n", addr);
1257}
1258
1259static int lan743x_mac_init(struct lan743x_adapter *adapter)
1260{
1261	bool mac_address_valid = true;
1262	struct net_device *netdev;
1263	u32 mac_addr_hi = 0;
1264	u32 mac_addr_lo = 0;
1265	u32 data;
1266
1267	netdev = adapter->netdev;
1268
1269	/* disable auto duplex, and speed detection. Phylib does that */
1270	data = lan743x_csr_read(adapter, MAC_CR);
1271	data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
1272	data |= MAC_CR_CNTR_RST_;
1273	lan743x_csr_write(adapter, MAC_CR, data);
1274
1275	if (!is_valid_ether_addr(adapter->mac_address)) {
1276		mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
1277		mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
1278		adapter->mac_address[0] = mac_addr_lo & 0xFF;
1279		adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
1280		adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
1281		adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
1282		adapter->mac_address[4] = mac_addr_hi & 0xFF;
1283		adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
1284
1285		if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
1286		    mac_addr_lo == 0xFFFFFFFF) {
1287			mac_address_valid = false;
1288		} else if (!is_valid_ether_addr(adapter->mac_address)) {
1289			mac_address_valid = false;
1290		}
1291
1292		if (!mac_address_valid)
1293			eth_random_addr(adapter->mac_address);
1294	}
1295	lan743x_mac_set_address(adapter, adapter->mac_address);
1296	eth_hw_addr_set(netdev, adapter->mac_address);
1297
1298	return 0;
1299}
1300
1301static int lan743x_mac_open(struct lan743x_adapter *adapter)
1302{
1303	u32 temp;
1304
1305	temp = lan743x_csr_read(adapter, MAC_RX);
1306	lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
1307	temp = lan743x_csr_read(adapter, MAC_TX);
1308	lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
1309	return 0;
1310}
1311
1312static void lan743x_mac_close(struct lan743x_adapter *adapter)
1313{
1314	u32 temp;
1315
1316	temp = lan743x_csr_read(adapter, MAC_TX);
1317	temp &= ~MAC_TX_TXEN_;
1318	lan743x_csr_write(adapter, MAC_TX, temp);
1319	lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
1320				 1, 1000, 20000, 100);
1321
1322	temp = lan743x_csr_read(adapter, MAC_RX);
1323	temp &= ~MAC_RX_RXEN_;
1324	lan743x_csr_write(adapter, MAC_RX, temp);
1325	lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
1326				 1, 1000, 20000, 100);
1327}
1328
1329void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
1330				       bool tx_enable, bool rx_enable)
1331{
1332	u32 flow_setting = 0;
1333
1334	/* set maximum pause time because when fifo space frees
1335	 * up a zero value pause frame will be sent to release the pause
1336	 */
1337	flow_setting = MAC_FLOW_CR_FCPT_MASK_;
1338	if (tx_enable)
1339		flow_setting |= MAC_FLOW_CR_TX_FCEN_;
1340	if (rx_enable)
1341		flow_setting |= MAC_FLOW_CR_RX_FCEN_;
1342	lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
1343}
1344
1345static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
1346{
1347	int enabled = 0;
1348	u32 mac_rx = 0;
1349
1350	mac_rx = lan743x_csr_read(adapter, MAC_RX);
1351	if (mac_rx & MAC_RX_RXEN_) {
1352		enabled = 1;
1353		if (mac_rx & MAC_RX_RXD_) {
1354			lan743x_csr_write(adapter, MAC_RX, mac_rx);
1355			mac_rx &= ~MAC_RX_RXD_;
1356		}
1357		mac_rx &= ~MAC_RX_RXEN_;
1358		lan743x_csr_write(adapter, MAC_RX, mac_rx);
1359		lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
1360					 1, 1000, 20000, 100);
1361		lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
1362	}
1363
1364	mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
1365	mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
1366		  << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1367	lan743x_csr_write(adapter, MAC_RX, mac_rx);
1368
1369	if (enabled) {
1370		mac_rx |= MAC_RX_RXEN_;
1371		lan743x_csr_write(adapter, MAC_RX, mac_rx);
1372	}
1373	return 0;
1374}
1375
1376/* PHY */
1377static int lan743x_phy_reset(struct lan743x_adapter *adapter)
1378{
1379	u32 data;
1380
1381	/* Only called with in probe, and before mdiobus_register */
1382
1383	data = lan743x_csr_read(adapter, PMT_CTL);
1384	data |= PMT_CTL_ETH_PHY_RST_;
1385	lan743x_csr_write(adapter, PMT_CTL, data);
1386
1387	return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
1388				  (!(data & PMT_CTL_ETH_PHY_RST_) &&
1389				  (data & PMT_CTL_READY_)),
1390				  50000, 1000000);
1391}
1392
1393static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
1394					   u16 local_adv, u16 remote_adv)
1395{
1396	struct lan743x_phy *phy = &adapter->phy;
1397	u8 cap;
1398
1399	if (phy->fc_autoneg)
1400		cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
1401	else
1402		cap = phy->fc_request_control;
1403
1404	lan743x_mac_flow_ctrl_set_enables(adapter,
1405					  cap & FLOW_CTRL_TX,
1406					  cap & FLOW_CTRL_RX);
1407}
1408
1409static int lan743x_phy_init(struct lan743x_adapter *adapter)
1410{
1411	return lan743x_phy_reset(adapter);
1412}
1413
1414static void lan743x_phy_link_status_change(struct net_device *netdev)
1415{
1416	struct lan743x_adapter *adapter = netdev_priv(netdev);
1417	struct phy_device *phydev = netdev->phydev;
1418	u32 data;
1419
1420	phy_print_status(phydev);
1421	if (phydev->state == PHY_RUNNING) {
1422		int remote_advertisement = 0;
1423		int local_advertisement = 0;
1424
1425		data = lan743x_csr_read(adapter, MAC_CR);
1426
1427		/* set interface mode */
1428		if (phy_interface_is_rgmii(phydev))
1429			/* RGMII */
1430			data &= ~MAC_CR_MII_EN_;
1431		else
1432			/* GMII */
1433			data |= MAC_CR_MII_EN_;
1434
1435		/* set duplex mode */
1436		if (phydev->duplex)
1437			data |= MAC_CR_DPX_;
1438		else
1439			data &= ~MAC_CR_DPX_;
 
 
 
1440
1441		/* set bus speed */
1442		switch (phydev->speed) {
1443		case SPEED_10:
1444			data &= ~MAC_CR_CFG_H_;
1445			data &= ~MAC_CR_CFG_L_;
1446		break;
1447		case SPEED_100:
1448			data &= ~MAC_CR_CFG_H_;
1449			data |= MAC_CR_CFG_L_;
1450		break;
1451		case SPEED_1000:
1452			data |= MAC_CR_CFG_H_;
1453			data &= ~MAC_CR_CFG_L_;
1454		break;
1455		case SPEED_2500:
1456			data |= MAC_CR_CFG_H_;
1457			data |= MAC_CR_CFG_L_;
1458		break;
1459		}
1460		lan743x_csr_write(adapter, MAC_CR, data);
1461
1462		local_advertisement =
1463			linkmode_adv_to_mii_adv_t(phydev->advertising);
1464		remote_advertisement =
1465			linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
1466
1467		lan743x_phy_update_flowcontrol(adapter, local_advertisement,
1468					       remote_advertisement);
1469		lan743x_ptp_update_latency(adapter, phydev->speed);
1470		if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
1471		    phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
1472		    phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
1473			lan743x_sgmii_config(adapter);
1474	}
1475}
1476
1477static void lan743x_phy_close(struct lan743x_adapter *adapter)
1478{
1479	struct net_device *netdev = adapter->netdev;
1480
1481	phy_stop(netdev->phydev);
1482	phy_disconnect(netdev->phydev);
1483	netdev->phydev = NULL;
1484}
1485
1486static int lan743x_phy_open(struct lan743x_adapter *adapter)
1487{
1488	struct net_device *netdev = adapter->netdev;
1489	struct lan743x_phy *phy = &adapter->phy;
1490	struct phy_device *phydev;
1491	int ret = -EIO;
1492
1493	/* try devicetree phy, or fixed link */
1494	phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
1495					lan743x_phy_link_status_change);
1496
1497	if (!phydev) {
1498		/* try internal phy */
1499		phydev = phy_find_first(adapter->mdiobus);
1500		if (!phydev)
1501			goto return_error;
1502
1503		if (adapter->is_pci11x1x)
1504			ret = phy_connect_direct(netdev, phydev,
1505						 lan743x_phy_link_status_change,
1506						 PHY_INTERFACE_MODE_RGMII);
1507		else
1508			ret = phy_connect_direct(netdev, phydev,
1509						 lan743x_phy_link_status_change,
1510						 PHY_INTERFACE_MODE_GMII);
1511		if (ret)
1512			goto return_error;
1513	}
1514
1515	/* MAC doesn't support 1000T Half */
1516	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1517
1518	/* support both flow controls */
1519	phy_support_asym_pause(phydev);
1520	phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1521	phy->fc_autoneg = phydev->autoneg;
1522
1523	phy_start(phydev);
1524	phy_start_aneg(phydev);
1525	phy_attached_info(phydev);
1526	return 0;
1527
1528return_error:
1529	return ret;
1530}
1531
1532static void lan743x_rfe_open(struct lan743x_adapter *adapter)
1533{
1534	lan743x_csr_write(adapter, RFE_RSS_CFG,
1535		RFE_RSS_CFG_UDP_IPV6_EX_ |
1536		RFE_RSS_CFG_TCP_IPV6_EX_ |
1537		RFE_RSS_CFG_IPV6_EX_ |
1538		RFE_RSS_CFG_UDP_IPV6_ |
1539		RFE_RSS_CFG_TCP_IPV6_ |
1540		RFE_RSS_CFG_IPV6_ |
1541		RFE_RSS_CFG_UDP_IPV4_ |
1542		RFE_RSS_CFG_TCP_IPV4_ |
1543		RFE_RSS_CFG_IPV4_ |
1544		RFE_RSS_CFG_VALID_HASH_BITS_ |
1545		RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
1546		RFE_RSS_CFG_RSS_HASH_STORE_ |
1547		RFE_RSS_CFG_RSS_ENABLE_);
1548}
1549
1550static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
1551{
1552	u8 *mac_addr;
1553	u32 mac_addr_hi = 0;
1554	u32 mac_addr_lo = 0;
1555
1556	/* Add mac address to perfect Filter */
1557	mac_addr = adapter->mac_address;
1558	mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
1559		      (((u32)(mac_addr[1])) << 8) |
1560		      (((u32)(mac_addr[2])) << 16) |
1561		      (((u32)(mac_addr[3])) << 24));
1562	mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
1563		      (((u32)(mac_addr[5])) << 8));
1564
1565	lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
1566	lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
1567			  mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
1568}
1569
1570static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
1571{
1572	struct net_device *netdev = adapter->netdev;
1573	u32 hash_table[DP_SEL_VHF_HASH_LEN];
1574	u32 rfctl;
1575	u32 data;
1576
1577	rfctl = lan743x_csr_read(adapter, RFE_CTL);
1578	rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
1579		 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1580	rfctl |= RFE_CTL_AB_;
1581	if (netdev->flags & IFF_PROMISC) {
1582		rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
1583	} else {
1584		if (netdev->flags & IFF_ALLMULTI)
1585			rfctl |= RFE_CTL_AM_;
1586	}
1587
1588	if (netdev->features & NETIF_F_RXCSUM)
1589		rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_;
1590
1591	memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
1592	if (netdev_mc_count(netdev)) {
1593		struct netdev_hw_addr *ha;
1594		int i;
1595
1596		rfctl |= RFE_CTL_DA_PERFECT_;
1597		i = 1;
1598		netdev_for_each_mc_addr(ha, netdev) {
1599			/* set first 32 into Perfect Filter */
1600			if (i < 33) {
1601				lan743x_csr_write(adapter,
1602						  RFE_ADDR_FILT_HI(i), 0);
1603				data = ha->addr[3];
1604				data = ha->addr[2] | (data << 8);
1605				data = ha->addr[1] | (data << 8);
1606				data = ha->addr[0] | (data << 8);
1607				lan743x_csr_write(adapter,
1608						  RFE_ADDR_FILT_LO(i), data);
1609				data = ha->addr[5];
1610				data = ha->addr[4] | (data << 8);
1611				data |= RFE_ADDR_FILT_HI_VALID_;
1612				lan743x_csr_write(adapter,
1613						  RFE_ADDR_FILT_HI(i), data);
1614			} else {
1615				u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
1616					     23) & 0x1FF;
1617				hash_table[bitnum / 32] |= (1 << (bitnum % 32));
1618				rfctl |= RFE_CTL_MCAST_HASH_;
1619			}
1620			i++;
1621		}
1622	}
1623
1624	lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
1625			 DP_SEL_VHF_VLAN_LEN,
1626			 DP_SEL_VHF_HASH_LEN, hash_table);
1627	lan743x_csr_write(adapter, RFE_CTL, rfctl);
1628}
1629
1630static int lan743x_dmac_init(struct lan743x_adapter *adapter)
1631{
1632	u32 data = 0;
1633
1634	lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
1635	lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
1636				 0, 1000, 20000, 100);
1637	switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
1638	case DMA_DESCRIPTOR_SPACING_16:
1639		data = DMAC_CFG_MAX_DSPACE_16_;
1640		break;
1641	case DMA_DESCRIPTOR_SPACING_32:
1642		data = DMAC_CFG_MAX_DSPACE_32_;
1643		break;
1644	case DMA_DESCRIPTOR_SPACING_64:
1645		data = DMAC_CFG_MAX_DSPACE_64_;
1646		break;
1647	case DMA_DESCRIPTOR_SPACING_128:
1648		data = DMAC_CFG_MAX_DSPACE_128_;
1649		break;
1650	default:
1651		return -EPERM;
1652	}
1653	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1654		data |= DMAC_CFG_COAL_EN_;
1655	data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
1656	data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
1657	lan743x_csr_write(adapter, DMAC_CFG, data);
1658	data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
1659	data |= DMAC_COAL_CFG_TIMER_TX_START_;
1660	data |= DMAC_COAL_CFG_FLUSH_INTS_;
1661	data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
1662	data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
1663	data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
1664	data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
1665	lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
1666	data = DMAC_OBFF_TX_THRES_SET_(0x08);
1667	data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
1668	lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
1669	return 0;
1670}
1671
1672static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
1673				     int tx_channel)
1674{
1675	u32 dmac_cmd = 0;
1676
1677	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1678	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1679				      DMAC_CMD_START_T_(tx_channel)),
1680				      (dmac_cmd &
1681				      DMAC_CMD_STOP_T_(tx_channel)));
1682}
1683
1684static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
1685					     int tx_channel)
1686{
1687	int timeout = 100;
1688	int result = 0;
1689
1690	while (timeout &&
1691	       ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
1692	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1693		usleep_range(1000, 20000);
1694		timeout--;
1695	}
1696	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1697		result = -ENODEV;
1698	return result;
1699}
1700
1701static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
1702				     int rx_channel)
1703{
1704	u32 dmac_cmd = 0;
1705
1706	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1707	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1708				      DMAC_CMD_START_R_(rx_channel)),
1709				      (dmac_cmd &
1710				      DMAC_CMD_STOP_R_(rx_channel)));
1711}
1712
1713static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
1714					     int rx_channel)
1715{
1716	int timeout = 100;
1717	int result = 0;
1718
1719	while (timeout &&
1720	       ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
1721	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1722		usleep_range(1000, 20000);
1723		timeout--;
1724	}
1725	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1726		result = -ENODEV;
1727	return result;
1728}
1729
1730static void lan743x_tx_release_desc(struct lan743x_tx *tx,
1731				    int descriptor_index, bool cleanup)
1732{
1733	struct lan743x_tx_buffer_info *buffer_info = NULL;
1734	struct lan743x_tx_descriptor *descriptor = NULL;
1735	u32 descriptor_type = 0;
1736	bool ignore_sync;
1737
1738	descriptor = &tx->ring_cpu_ptr[descriptor_index];
1739	buffer_info = &tx->buffer_info[descriptor_index];
1740	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
1741		goto done;
1742
1743	descriptor_type = le32_to_cpu(descriptor->data0) &
1744			  TX_DESC_DATA0_DTYPE_MASK_;
1745	if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
1746		goto clean_up_data_descriptor;
1747	else
1748		goto clear_active;
1749
1750clean_up_data_descriptor:
1751	if (buffer_info->dma_ptr) {
1752		if (buffer_info->flags &
1753		    TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
1754			dma_unmap_page(&tx->adapter->pdev->dev,
1755				       buffer_info->dma_ptr,
1756				       buffer_info->buffer_length,
1757				       DMA_TO_DEVICE);
1758		} else {
1759			dma_unmap_single(&tx->adapter->pdev->dev,
1760					 buffer_info->dma_ptr,
1761					 buffer_info->buffer_length,
1762					 DMA_TO_DEVICE);
1763		}
1764		buffer_info->dma_ptr = 0;
1765		buffer_info->buffer_length = 0;
1766	}
1767	if (!buffer_info->skb)
1768		goto clear_active;
1769
1770	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
1771		dev_kfree_skb_any(buffer_info->skb);
1772		goto clear_skb;
1773	}
1774
1775	if (cleanup) {
1776		lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
1777		dev_kfree_skb_any(buffer_info->skb);
1778	} else {
1779		ignore_sync = (buffer_info->flags &
1780			       TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
1781		lan743x_ptp_tx_timestamp_skb(tx->adapter,
1782					     buffer_info->skb, ignore_sync);
1783	}
1784
1785clear_skb:
1786	buffer_info->skb = NULL;
1787
1788clear_active:
1789	buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
1790
1791done:
1792	memset(buffer_info, 0, sizeof(*buffer_info));
1793	memset(descriptor, 0, sizeof(*descriptor));
1794}
1795
1796static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
1797{
1798	return ((++index) % tx->ring_size);
1799}
1800
1801static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
1802{
1803	while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
1804		lan743x_tx_release_desc(tx, tx->last_head, false);
1805		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1806	}
1807}
1808
1809static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
1810{
1811	u32 original_head = 0;
1812
1813	original_head = tx->last_head;
1814	do {
1815		lan743x_tx_release_desc(tx, tx->last_head, true);
1816		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1817	} while (tx->last_head != original_head);
1818	memset(tx->ring_cpu_ptr, 0,
1819	       sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
1820	memset(tx->buffer_info, 0,
1821	       sizeof(*tx->buffer_info) * (tx->ring_size));
1822}
1823
1824static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
1825				   struct sk_buff *skb)
1826{
1827	int result = 1; /* 1 for the main skb buffer */
1828	int nr_frags = 0;
1829
1830	if (skb_is_gso(skb))
1831		result++; /* requires an extension descriptor */
1832	nr_frags = skb_shinfo(skb)->nr_frags;
1833	result += nr_frags; /* 1 for each fragment buffer */
1834	return result;
1835}
1836
1837static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
1838{
1839	int last_head = tx->last_head;
1840	int last_tail = tx->last_tail;
1841
1842	if (last_tail >= last_head)
1843		return tx->ring_size - last_tail + last_head - 1;
1844	else
1845		return last_head - last_tail - 1;
1846}
1847
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1848void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
1849				      bool enable_timestamping,
1850				      bool enable_onestep_sync)
1851{
1852	if (enable_timestamping)
1853		tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
1854	else
1855		tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
1856	if (enable_onestep_sync)
1857		tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
1858	else
1859		tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
1860}
1861
1862static int lan743x_tx_frame_start(struct lan743x_tx *tx,
1863				  unsigned char *first_buffer,
1864				  unsigned int first_buffer_length,
1865				  unsigned int frame_length,
1866				  bool time_stamp,
1867				  bool check_sum)
1868{
1869	/* called only from within lan743x_tx_xmit_frame.
1870	 * assuming tx->ring_lock has already been acquired.
1871	 */
1872	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1873	struct lan743x_tx_buffer_info *buffer_info = NULL;
1874	struct lan743x_adapter *adapter = tx->adapter;
1875	struct device *dev = &adapter->pdev->dev;
1876	dma_addr_t dma_ptr;
1877
1878	tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
1879	tx->frame_first = tx->last_tail;
1880	tx->frame_tail = tx->frame_first;
1881
1882	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1883	buffer_info = &tx->buffer_info[tx->frame_tail];
1884	dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
1885				 DMA_TO_DEVICE);
1886	if (dma_mapping_error(dev, dma_ptr))
1887		return -ENOMEM;
1888
1889	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1890	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1891	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1892		TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1893
1894	buffer_info->skb = NULL;
1895	buffer_info->dma_ptr = dma_ptr;
1896	buffer_info->buffer_length = first_buffer_length;
1897	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1898
1899	tx->frame_data0 = (first_buffer_length &
1900		TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1901		TX_DESC_DATA0_DTYPE_DATA_ |
1902		TX_DESC_DATA0_FS_ |
1903		TX_DESC_DATA0_FCS_;
1904	if (time_stamp)
1905		tx->frame_data0 |= TX_DESC_DATA0_TSE_;
1906
1907	if (check_sum)
1908		tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
1909				   TX_DESC_DATA0_IPE_ |
1910				   TX_DESC_DATA0_TPE_;
1911
1912	/* data0 will be programmed in one of other frame assembler functions */
1913	return 0;
1914}
1915
1916static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
1917				     unsigned int frame_length,
1918				     int nr_frags)
1919{
1920	/* called only from within lan743x_tx_xmit_frame.
1921	 * assuming tx->ring_lock has already been acquired.
1922	 */
1923	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1924	struct lan743x_tx_buffer_info *buffer_info = NULL;
1925
1926	/* wrap up previous descriptor */
1927	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
1928	if (nr_frags <= 0) {
1929		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1930		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1931	}
1932	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1933	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1934
1935	/* move to next descriptor */
1936	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1937	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1938	buffer_info = &tx->buffer_info[tx->frame_tail];
1939
1940	/* add extension descriptor */
1941	tx_descriptor->data1 = 0;
1942	tx_descriptor->data2 = 0;
1943	tx_descriptor->data3 = 0;
1944
1945	buffer_info->skb = NULL;
1946	buffer_info->dma_ptr = 0;
1947	buffer_info->buffer_length = 0;
1948	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1949
1950	tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
1951			  TX_DESC_DATA0_DTYPE_EXT_ |
1952			  TX_DESC_DATA0_EXT_LSO_;
1953
1954	/* data0 will be programmed in one of other frame assembler functions */
1955}
1956
1957static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
1958					 const skb_frag_t *fragment,
1959					 unsigned int frame_length)
1960{
1961	/* called only from within lan743x_tx_xmit_frame
1962	 * assuming tx->ring_lock has already been acquired
1963	 */
1964	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1965	struct lan743x_tx_buffer_info *buffer_info = NULL;
1966	struct lan743x_adapter *adapter = tx->adapter;
1967	struct device *dev = &adapter->pdev->dev;
1968	unsigned int fragment_length = 0;
1969	dma_addr_t dma_ptr;
1970
1971	fragment_length = skb_frag_size(fragment);
1972	if (!fragment_length)
1973		return 0;
1974
1975	/* wrap up previous descriptor */
1976	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1977	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1978
1979	/* move to next descriptor */
1980	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1981	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1982	buffer_info = &tx->buffer_info[tx->frame_tail];
1983	dma_ptr = skb_frag_dma_map(dev, fragment,
1984				   0, fragment_length,
1985				   DMA_TO_DEVICE);
1986	if (dma_mapping_error(dev, dma_ptr)) {
1987		int desc_index;
1988
1989		/* cleanup all previously setup descriptors */
1990		desc_index = tx->frame_first;
1991		while (desc_index != tx->frame_tail) {
1992			lan743x_tx_release_desc(tx, desc_index, true);
1993			desc_index = lan743x_tx_next_index(tx, desc_index);
1994		}
1995		dma_wmb();
1996		tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1997		tx->frame_first = 0;
1998		tx->frame_data0 = 0;
1999		tx->frame_tail = 0;
2000		return -ENOMEM;
2001	}
2002
2003	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
2004	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
2005	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
2006			       TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
2007
2008	buffer_info->skb = NULL;
2009	buffer_info->dma_ptr = dma_ptr;
2010	buffer_info->buffer_length = fragment_length;
2011	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
2012	buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
2013
2014	tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
2015			  TX_DESC_DATA0_DTYPE_DATA_ |
2016			  TX_DESC_DATA0_FCS_;
2017
2018	/* data0 will be programmed in one of other frame assembler functions */
2019	return 0;
2020}
2021
2022static void lan743x_tx_frame_end(struct lan743x_tx *tx,
2023				 struct sk_buff *skb,
2024				 bool time_stamp,
2025				 bool ignore_sync)
2026{
2027	/* called only from within lan743x_tx_xmit_frame
2028	 * assuming tx->ring_lock has already been acquired
2029	 */
2030	struct lan743x_tx_descriptor *tx_descriptor = NULL;
2031	struct lan743x_tx_buffer_info *buffer_info = NULL;
2032	struct lan743x_adapter *adapter = tx->adapter;
2033	u32 tx_tail_flags = 0;
2034
2035	/* wrap up previous descriptor */
2036	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
2037	    TX_DESC_DATA0_DTYPE_DATA_) {
2038		tx->frame_data0 |= TX_DESC_DATA0_LS_;
2039		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
2040	}
2041
2042	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
2043	buffer_info = &tx->buffer_info[tx->frame_tail];
2044	buffer_info->skb = skb;
2045	if (time_stamp)
2046		buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
2047	if (ignore_sync)
2048		buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
2049
2050	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
2051	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
2052	tx->last_tail = tx->frame_tail;
2053
2054	dma_wmb();
2055
2056	if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2057		tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
2058	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
2059		tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
2060		TX_TAIL_SET_TOP_INT_EN_;
2061
2062	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
2063			  tx_tail_flags | tx->frame_tail);
2064	tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
2065}
2066
2067static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
2068					 struct sk_buff *skb)
2069{
2070	int required_number_of_descriptors = 0;
2071	unsigned int start_frame_length = 0;
2072	netdev_tx_t retval = NETDEV_TX_OK;
2073	unsigned int frame_length = 0;
2074	unsigned int head_length = 0;
2075	unsigned long irq_flags = 0;
2076	bool do_timestamp = false;
2077	bool ignore_sync = false;
2078	struct netdev_queue *txq;
2079	int nr_frags = 0;
2080	bool gso = false;
2081	int j;
2082
2083	required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
2084
2085	spin_lock_irqsave(&tx->ring_lock, irq_flags);
2086	if (required_number_of_descriptors >
2087		lan743x_tx_get_avail_desc(tx)) {
2088		if (required_number_of_descriptors > (tx->ring_size - 1)) {
2089			dev_kfree_skb_irq(skb);
2090		} else {
2091			/* save how many descriptors we needed to restart the queue */
2092			tx->rqd_descriptors = required_number_of_descriptors;
2093			retval = NETDEV_TX_BUSY;
2094			txq = netdev_get_tx_queue(tx->adapter->netdev,
2095						  tx->channel_number);
2096			netif_tx_stop_queue(txq);
2097		}
2098		goto unlock;
2099	}
2100
2101	/* space available, transmit skb  */
2102	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2103	    (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
2104	    (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
2105		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2106		do_timestamp = true;
2107		if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
2108			ignore_sync = true;
2109	}
2110	head_length = skb_headlen(skb);
2111	frame_length = skb_pagelen(skb);
2112	nr_frags = skb_shinfo(skb)->nr_frags;
2113	start_frame_length = frame_length;
2114	gso = skb_is_gso(skb);
2115	if (gso) {
2116		start_frame_length = max(skb_shinfo(skb)->gso_size,
2117					 (unsigned short)8);
2118	}
2119
2120	if (lan743x_tx_frame_start(tx,
2121				   skb->data, head_length,
2122				   start_frame_length,
2123				   do_timestamp,
2124				   skb->ip_summed == CHECKSUM_PARTIAL)) {
2125		dev_kfree_skb_irq(skb);
2126		goto unlock;
2127	}
2128	tx->frame_count++;
2129
2130	if (gso)
2131		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
2132
2133	if (nr_frags <= 0)
2134		goto finish;
2135
2136	for (j = 0; j < nr_frags; j++) {
2137		const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
2138
2139		if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
2140			/* upon error no need to call
2141			 *	lan743x_tx_frame_end
2142			 * frame assembler clean up was performed inside
2143			 *	lan743x_tx_frame_add_fragment
2144			 */
2145			dev_kfree_skb_irq(skb);
2146			goto unlock;
2147		}
2148	}
2149
2150finish:
2151	lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
2152
2153unlock:
2154	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
2155	return retval;
2156}
2157
2158static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
2159{
2160	struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
2161	struct lan743x_adapter *adapter = tx->adapter;
2162	unsigned long irq_flags = 0;
2163	struct netdev_queue *txq;
2164	u32 ioc_bit = 0;
2165
2166	ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
2167	lan743x_csr_read(adapter, DMAC_INT_STS);
2168	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
2169		lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
2170	spin_lock_irqsave(&tx->ring_lock, irq_flags);
2171
2172	/* clean up tx ring */
2173	lan743x_tx_release_completed_descriptors(tx);
2174	txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number);
2175	if (netif_tx_queue_stopped(txq)) {
2176		if (tx->rqd_descriptors) {
2177			if (tx->rqd_descriptors <=
2178			    lan743x_tx_get_avail_desc(tx)) {
2179				tx->rqd_descriptors = 0;
2180				netif_tx_wake_queue(txq);
2181			}
2182		} else {
2183			netif_tx_wake_queue(txq);
2184		}
2185	}
2186	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
2187
2188	if (!napi_complete(napi))
2189		goto done;
2190
2191	/* enable isr */
2192	lan743x_csr_write(adapter, INT_EN_SET,
2193			  INT_BIT_DMA_TX_(tx->channel_number));
2194	lan743x_csr_read(adapter, INT_STS);
2195
2196done:
2197	return 0;
2198}
2199
2200static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
2201{
2202	if (tx->head_cpu_ptr) {
2203		dma_free_coherent(&tx->adapter->pdev->dev,
2204				  sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr,
2205				  tx->head_dma_ptr);
2206		tx->head_cpu_ptr = NULL;
2207		tx->head_dma_ptr = 0;
2208	}
2209	kfree(tx->buffer_info);
2210	tx->buffer_info = NULL;
2211
2212	if (tx->ring_cpu_ptr) {
2213		dma_free_coherent(&tx->adapter->pdev->dev,
2214				  tx->ring_allocation_size, tx->ring_cpu_ptr,
2215				  tx->ring_dma_ptr);
2216		tx->ring_allocation_size = 0;
2217		tx->ring_cpu_ptr = NULL;
2218		tx->ring_dma_ptr = 0;
2219	}
2220	tx->ring_size = 0;
2221}
2222
2223static int lan743x_tx_ring_init(struct lan743x_tx *tx)
2224{
2225	size_t ring_allocation_size = 0;
2226	void *cpu_ptr = NULL;
2227	dma_addr_t dma_ptr;
2228	int ret = -ENOMEM;
2229
2230	tx->ring_size = LAN743X_TX_RING_SIZE;
2231	if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
2232		ret = -EINVAL;
2233		goto cleanup;
2234	}
2235	if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
2236				      DMA_BIT_MASK(64))) {
2237		dev_warn(&tx->adapter->pdev->dev,
2238			 "lan743x_: No suitable DMA available\n");
2239		ret = -ENOMEM;
2240		goto cleanup;
2241	}
2242	ring_allocation_size = ALIGN(tx->ring_size *
2243				     sizeof(struct lan743x_tx_descriptor),
2244				     PAGE_SIZE);
2245	dma_ptr = 0;
2246	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
2247				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2248	if (!cpu_ptr) {
2249		ret = -ENOMEM;
2250		goto cleanup;
2251	}
2252
2253	tx->ring_allocation_size = ring_allocation_size;
2254	tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
2255	tx->ring_dma_ptr = dma_ptr;
2256
2257	cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
2258	if (!cpu_ptr) {
2259		ret = -ENOMEM;
2260		goto cleanup;
2261	}
2262	tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
2263	dma_ptr = 0;
2264	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
2265				     sizeof(*tx->head_cpu_ptr), &dma_ptr,
2266				     GFP_KERNEL);
2267	if (!cpu_ptr) {
2268		ret = -ENOMEM;
2269		goto cleanup;
2270	}
2271
2272	tx->head_cpu_ptr = cpu_ptr;
2273	tx->head_dma_ptr = dma_ptr;
2274	if (tx->head_dma_ptr & 0x3) {
2275		ret = -ENOMEM;
2276		goto cleanup;
2277	}
2278
2279	return 0;
2280
2281cleanup:
2282	lan743x_tx_ring_cleanup(tx);
2283	return ret;
2284}
2285
2286static void lan743x_tx_close(struct lan743x_tx *tx)
2287{
2288	struct lan743x_adapter *adapter = tx->adapter;
2289
2290	lan743x_csr_write(adapter,
2291			  DMAC_CMD,
2292			  DMAC_CMD_STOP_T_(tx->channel_number));
2293	lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
2294
2295	lan743x_csr_write(adapter,
2296			  DMAC_INT_EN_CLR,
2297			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
2298	lan743x_csr_write(adapter, INT_EN_CLR,
2299			  INT_BIT_DMA_TX_(tx->channel_number));
2300	napi_disable(&tx->napi);
2301	netif_napi_del(&tx->napi);
2302
2303	lan743x_csr_write(adapter, FCT_TX_CTL,
2304			  FCT_TX_CTL_DIS_(tx->channel_number));
2305	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
2306				 FCT_TX_CTL_EN_(tx->channel_number),
2307				 0, 1000, 20000, 100);
2308
2309	lan743x_tx_release_all_descriptors(tx);
2310
2311	tx->rqd_descriptors = 0;
2312
2313	lan743x_tx_ring_cleanup(tx);
2314}
2315
2316static int lan743x_tx_open(struct lan743x_tx *tx)
2317{
2318	struct lan743x_adapter *adapter = NULL;
2319	u32 data = 0;
2320	int ret;
2321
2322	adapter = tx->adapter;
2323	ret = lan743x_tx_ring_init(tx);
2324	if (ret)
2325		return ret;
2326
2327	/* initialize fifo */
2328	lan743x_csr_write(adapter, FCT_TX_CTL,
2329			  FCT_TX_CTL_RESET_(tx->channel_number));
2330	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
2331				 FCT_TX_CTL_RESET_(tx->channel_number),
2332				 0, 1000, 20000, 100);
2333
2334	/* enable fifo */
2335	lan743x_csr_write(adapter, FCT_TX_CTL,
2336			  FCT_TX_CTL_EN_(tx->channel_number));
2337
2338	/* reset tx channel */
2339	lan743x_csr_write(adapter, DMAC_CMD,
2340			  DMAC_CMD_TX_SWR_(tx->channel_number));
2341	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2342				 DMAC_CMD_TX_SWR_(tx->channel_number),
2343				 0, 1000, 20000, 100);
2344
2345	/* Write TX_BASE_ADDR */
2346	lan743x_csr_write(adapter,
2347			  TX_BASE_ADDRH(tx->channel_number),
2348			  DMA_ADDR_HIGH32(tx->ring_dma_ptr));
2349	lan743x_csr_write(adapter,
2350			  TX_BASE_ADDRL(tx->channel_number),
2351			  DMA_ADDR_LOW32(tx->ring_dma_ptr));
2352
2353	/* Write TX_CFG_B */
2354	data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
2355	data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
2356	data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
2357	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2358		data |= TX_CFG_B_TDMABL_512_;
2359	lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
2360
2361	/* Write TX_CFG_A */
2362	data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
2363	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2364		data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
2365		data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
2366		data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
2367		data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
2368	}
2369	lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
2370
2371	/* Write TX_HEAD_WRITEBACK_ADDR */
2372	lan743x_csr_write(adapter,
2373			  TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
2374			  DMA_ADDR_HIGH32(tx->head_dma_ptr));
2375	lan743x_csr_write(adapter,
2376			  TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
2377			  DMA_ADDR_LOW32(tx->head_dma_ptr));
2378
2379	/* set last head */
2380	tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
2381
2382	/* write TX_TAIL */
2383	tx->last_tail = 0;
2384	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
2385			  (u32)(tx->last_tail));
2386	tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2387							 INT_BIT_DMA_TX_
2388							 (tx->channel_number));
2389	netif_napi_add_tx_weight(adapter->netdev,
2390				 &tx->napi, lan743x_tx_napi_poll,
2391				 NAPI_POLL_WEIGHT);
2392	napi_enable(&tx->napi);
2393
2394	data = 0;
2395	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2396		data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
2397	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2398		data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
2399	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2400		data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
2401	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2402		data |= TX_CFG_C_TX_INT_EN_R2C_;
2403	lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
2404
2405	if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
2406		lan743x_csr_write(adapter, INT_EN_SET,
2407				  INT_BIT_DMA_TX_(tx->channel_number));
2408	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2409			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
2410
2411	/*  start dmac channel */
2412	lan743x_csr_write(adapter, DMAC_CMD,
2413			  DMAC_CMD_START_T_(tx->channel_number));
2414	return 0;
2415}
2416
2417static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
2418{
2419	return ((++index) % rx->ring_size);
2420}
2421
2422static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
2423{
2424	/* update the tail once per 8 descriptors */
2425	if ((index & 7) == 7)
2426		lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
2427				  index);
2428}
2429
2430static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
2431					gfp_t gfp)
2432{
2433	struct net_device *netdev = rx->adapter->netdev;
2434	struct device *dev = &rx->adapter->pdev->dev;
2435	struct lan743x_rx_buffer_info *buffer_info;
2436	unsigned int buffer_length, used_length;
2437	struct lan743x_rx_descriptor *descriptor;
2438	struct sk_buff *skb;
2439	dma_addr_t dma_ptr;
2440
2441	buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
2442
2443	descriptor = &rx->ring_cpu_ptr[index];
2444	buffer_info = &rx->buffer_info[index];
2445	skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
2446	if (!skb)
2447		return -ENOMEM;
2448	dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
2449	if (dma_mapping_error(dev, dma_ptr)) {
2450		dev_kfree_skb_any(skb);
2451		return -ENOMEM;
2452	}
2453	if (buffer_info->dma_ptr) {
2454		/* sync used area of buffer only */
2455		if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_)
2456			/* frame length is valid only if LS bit is set.
2457			 * it's a safe upper bound for the used area in this
2458			 * buffer.
2459			 */
2460			used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_
2461					  (le32_to_cpu(descriptor->data0)),
2462					  buffer_info->buffer_length);
2463		else
2464			used_length = buffer_info->buffer_length;
2465		dma_sync_single_for_cpu(dev, buffer_info->dma_ptr,
2466					used_length,
2467					DMA_FROM_DEVICE);
2468		dma_unmap_single_attrs(dev, buffer_info->dma_ptr,
2469				       buffer_info->buffer_length,
2470				       DMA_FROM_DEVICE,
2471				       DMA_ATTR_SKIP_CPU_SYNC);
2472	}
2473
2474	buffer_info->skb = skb;
2475	buffer_info->dma_ptr = dma_ptr;
2476	buffer_info->buffer_length = buffer_length;
2477	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2478	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2479	descriptor->data3 = 0;
2480	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2481			    (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2482	lan743x_rx_update_tail(rx, index);
2483
2484	return 0;
2485}
2486
2487static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
2488{
2489	struct lan743x_rx_buffer_info *buffer_info;
2490	struct lan743x_rx_descriptor *descriptor;
2491
2492	descriptor = &rx->ring_cpu_ptr[index];
2493	buffer_info = &rx->buffer_info[index];
2494
2495	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2496	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2497	descriptor->data3 = 0;
2498	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2499			    ((buffer_info->buffer_length) &
2500			    RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2501	lan743x_rx_update_tail(rx, index);
2502}
2503
2504static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
2505{
2506	struct lan743x_rx_buffer_info *buffer_info;
2507	struct lan743x_rx_descriptor *descriptor;
2508
2509	descriptor = &rx->ring_cpu_ptr[index];
2510	buffer_info = &rx->buffer_info[index];
2511
2512	memset(descriptor, 0, sizeof(*descriptor));
2513
2514	if (buffer_info->dma_ptr) {
2515		dma_unmap_single(&rx->adapter->pdev->dev,
2516				 buffer_info->dma_ptr,
2517				 buffer_info->buffer_length,
2518				 DMA_FROM_DEVICE);
2519		buffer_info->dma_ptr = 0;
2520	}
2521
2522	if (buffer_info->skb) {
2523		dev_kfree_skb(buffer_info->skb);
2524		buffer_info->skb = NULL;
2525	}
2526
2527	memset(buffer_info, 0, sizeof(*buffer_info));
2528}
2529
2530static struct sk_buff *
2531lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
2532{
2533	if (skb_linearize(skb)) {
2534		dev_kfree_skb_irq(skb);
2535		return NULL;
2536	}
2537	frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
2538	if (skb->len > frame_length) {
2539		skb->tail -= skb->len - frame_length;
2540		skb->len = frame_length;
2541	}
2542	return skb;
2543}
2544
2545static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
2546{
2547	int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
2548	struct lan743x_rx_descriptor *descriptor, *desc_ext;
2549	struct net_device *netdev = rx->adapter->netdev;
2550	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2551	struct lan743x_rx_buffer_info *buffer_info;
2552	int frame_length, buffer_length;
2553	bool is_ice, is_tce, is_icsm;
2554	int extension_index = -1;
2555	bool is_last, is_first;
2556	struct sk_buff *skb;
2557
2558	if (current_head_index < 0 || current_head_index >= rx->ring_size)
2559		goto done;
2560
2561	if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
2562		goto done;
2563
2564	if (rx->last_head == current_head_index)
2565		goto done;
2566
2567	descriptor = &rx->ring_cpu_ptr[rx->last_head];
2568	if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
2569		goto done;
2570	buffer_info = &rx->buffer_info[rx->last_head];
2571
2572	is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_;
2573	is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_;
2574
2575	if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
2576		/* extension is expected to follow */
2577		int index = lan743x_rx_next_index(rx, rx->last_head);
2578
2579		if (index == current_head_index)
2580			/* extension not yet available */
2581			goto done;
2582		desc_ext = &rx->ring_cpu_ptr[index];
2583		if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_)
2584			/* extension not yet available */
2585			goto done;
2586		if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_))
2587			goto move_forward;
2588		extension_index = index;
2589	}
2590
2591	/* Only the last buffer in a multi-buffer frame contains the total frame
2592	 * length. The chip occasionally sends more buffers than strictly
2593	 * required to reach the total frame length.
2594	 * Handle this by adding all buffers to the skb in their entirety.
2595	 * Once the real frame length is known, trim the skb.
2596	 */
2597	frame_length =
2598		RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
2599	buffer_length = buffer_info->buffer_length;
2600	is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_;
2601	is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_;
2602	is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_;
2603
2604	netdev_dbg(netdev, "%s%schunk: %d/%d",
2605		   is_first ? "first " : "      ",
2606		   is_last  ? "last  " : "      ",
2607		   frame_length, buffer_length);
2608
2609	/* save existing skb, allocate new skb and map to dma */
2610	skb = buffer_info->skb;
2611	if (lan743x_rx_init_ring_element(rx, rx->last_head,
2612					 GFP_ATOMIC | GFP_DMA)) {
2613		/* failed to allocate next skb.
2614		 * Memory is very low.
2615		 * Drop this packet and reuse buffer.
2616		 */
2617		lan743x_rx_reuse_ring_element(rx, rx->last_head);
2618		/* drop packet that was being assembled */
2619		dev_kfree_skb_irq(rx->skb_head);
2620		rx->skb_head = NULL;
2621		goto process_extension;
2622	}
2623
2624	/* add buffers to skb via skb->frag_list */
2625	if (is_first) {
2626		skb_reserve(skb, RX_HEAD_PADDING);
2627		skb_put(skb, buffer_length - RX_HEAD_PADDING);
2628		if (rx->skb_head)
2629			dev_kfree_skb_irq(rx->skb_head);
2630		rx->skb_head = skb;
2631	} else if (rx->skb_head) {
2632		skb_put(skb, buffer_length);
2633		if (skb_shinfo(rx->skb_head)->frag_list)
2634			rx->skb_tail->next = skb;
2635		else
2636			skb_shinfo(rx->skb_head)->frag_list = skb;
2637		rx->skb_tail = skb;
2638		rx->skb_head->len += skb->len;
2639		rx->skb_head->data_len += skb->len;
2640		rx->skb_head->truesize += skb->truesize;
2641	} else {
2642		/* packet to assemble has already been dropped because one or
2643		 * more of its buffers could not be allocated
2644		 */
2645		netdev_dbg(netdev, "drop buffer intended for dropped packet");
2646		dev_kfree_skb_irq(skb);
2647	}
2648
2649process_extension:
2650	if (extension_index >= 0) {
2651		u32 ts_sec;
2652		u32 ts_nsec;
2653
2654		ts_sec = le32_to_cpu(desc_ext->data1);
2655		ts_nsec = (le32_to_cpu(desc_ext->data2) &
2656			  RX_DESC_DATA2_TS_NS_MASK_);
2657		if (rx->skb_head)
2658			skb_hwtstamps(rx->skb_head)->hwtstamp =
2659				ktime_set(ts_sec, ts_nsec);
2660		lan743x_rx_reuse_ring_element(rx, extension_index);
2661		rx->last_head = extension_index;
2662		netdev_dbg(netdev, "process extension");
2663	}
2664
2665	if (is_last && rx->skb_head)
2666		rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length);
2667
2668	if (is_last && rx->skb_head) {
2669		rx->skb_head->protocol = eth_type_trans(rx->skb_head,
2670							rx->adapter->netdev);
2671		if (rx->adapter->netdev->features & NETIF_F_RXCSUM) {
2672			if (!is_ice && !is_tce && !is_icsm)
2673				skb->ip_summed = CHECKSUM_UNNECESSARY;
2674		}
2675		netdev_dbg(netdev, "sending %d byte frame to OS",
2676			   rx->skb_head->len);
2677		napi_gro_receive(&rx->napi, rx->skb_head);
2678		rx->skb_head = NULL;
2679	}
2680
2681move_forward:
2682	/* push tail and head forward */
2683	rx->last_tail = rx->last_head;
2684	rx->last_head = lan743x_rx_next_index(rx, rx->last_head);
2685	result = RX_PROCESS_RESULT_BUFFER_RECEIVED;
2686done:
2687	return result;
2688}
2689
2690static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
2691{
2692	struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
2693	struct lan743x_adapter *adapter = rx->adapter;
2694	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2695	u32 rx_tail_flags = 0;
2696	int count;
2697
2698	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
2699		/* clear int status bit before reading packet */
2700		lan743x_csr_write(adapter, DMAC_INT_STS,
2701				  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2702	}
2703	for (count = 0; count < weight; count++) {
2704		result = lan743x_rx_process_buffer(rx);
2705		if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
2706			break;
2707	}
2708	rx->frame_count += count;
2709	if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED)
2710		return weight;
2711
2712	if (!napi_complete_done(napi, count))
2713		return count;
2714
2715	/* re-arm interrupts, must write to rx tail on some chip variants */
2716	if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2717		rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
2718	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
2719		rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
2720	} else {
2721		lan743x_csr_write(adapter, INT_EN_SET,
2722				  INT_BIT_DMA_RX_(rx->channel_number));
2723	}
2724
2725	if (rx_tail_flags)
2726		lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2727				  rx_tail_flags | rx->last_tail);
2728
2729	return count;
2730}
2731
2732static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
2733{
2734	if (rx->buffer_info && rx->ring_cpu_ptr) {
2735		int index;
2736
2737		for (index = 0; index < rx->ring_size; index++)
2738			lan743x_rx_release_ring_element(rx, index);
2739	}
2740
2741	if (rx->head_cpu_ptr) {
2742		dma_free_coherent(&rx->adapter->pdev->dev,
2743				  sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr,
2744				  rx->head_dma_ptr);
2745		rx->head_cpu_ptr = NULL;
2746		rx->head_dma_ptr = 0;
2747	}
2748
2749	kfree(rx->buffer_info);
2750	rx->buffer_info = NULL;
2751
2752	if (rx->ring_cpu_ptr) {
2753		dma_free_coherent(&rx->adapter->pdev->dev,
2754				  rx->ring_allocation_size, rx->ring_cpu_ptr,
2755				  rx->ring_dma_ptr);
2756		rx->ring_allocation_size = 0;
2757		rx->ring_cpu_ptr = NULL;
2758		rx->ring_dma_ptr = 0;
2759	}
2760
2761	rx->ring_size = 0;
2762	rx->last_head = 0;
2763}
2764
2765static int lan743x_rx_ring_init(struct lan743x_rx *rx)
2766{
2767	size_t ring_allocation_size = 0;
2768	dma_addr_t dma_ptr = 0;
2769	void *cpu_ptr = NULL;
2770	int ret = -ENOMEM;
2771	int index = 0;
2772
2773	rx->ring_size = LAN743X_RX_RING_SIZE;
2774	if (rx->ring_size <= 1) {
2775		ret = -EINVAL;
2776		goto cleanup;
2777	}
2778	if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
2779		ret = -EINVAL;
2780		goto cleanup;
2781	}
2782	if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
2783				      DMA_BIT_MASK(64))) {
2784		dev_warn(&rx->adapter->pdev->dev,
2785			 "lan743x_: No suitable DMA available\n");
2786		ret = -ENOMEM;
2787		goto cleanup;
2788	}
2789	ring_allocation_size = ALIGN(rx->ring_size *
2790				     sizeof(struct lan743x_rx_descriptor),
2791				     PAGE_SIZE);
2792	dma_ptr = 0;
2793	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2794				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2795	if (!cpu_ptr) {
2796		ret = -ENOMEM;
2797		goto cleanup;
2798	}
2799	rx->ring_allocation_size = ring_allocation_size;
2800	rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
2801	rx->ring_dma_ptr = dma_ptr;
2802
2803	cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
2804			  GFP_KERNEL);
2805	if (!cpu_ptr) {
2806		ret = -ENOMEM;
2807		goto cleanup;
2808	}
2809	rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
2810	dma_ptr = 0;
2811	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2812				     sizeof(*rx->head_cpu_ptr), &dma_ptr,
2813				     GFP_KERNEL);
2814	if (!cpu_ptr) {
2815		ret = -ENOMEM;
2816		goto cleanup;
2817	}
2818
2819	rx->head_cpu_ptr = cpu_ptr;
2820	rx->head_dma_ptr = dma_ptr;
2821	if (rx->head_dma_ptr & 0x3) {
2822		ret = -ENOMEM;
2823		goto cleanup;
2824	}
2825
2826	rx->last_head = 0;
2827	for (index = 0; index < rx->ring_size; index++) {
2828		ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
2829		if (ret)
2830			goto cleanup;
2831	}
2832	return 0;
2833
2834cleanup:
2835	netif_warn(rx->adapter, ifup, rx->adapter->netdev,
2836		   "Error allocating memory for LAN743x\n");
2837
2838	lan743x_rx_ring_cleanup(rx);
2839	return ret;
2840}
2841
2842static void lan743x_rx_close(struct lan743x_rx *rx)
2843{
2844	struct lan743x_adapter *adapter = rx->adapter;
2845
2846	lan743x_csr_write(adapter, FCT_RX_CTL,
2847			  FCT_RX_CTL_DIS_(rx->channel_number));
2848	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2849				 FCT_RX_CTL_EN_(rx->channel_number),
2850				 0, 1000, 20000, 100);
2851
2852	lan743x_csr_write(adapter, DMAC_CMD,
2853			  DMAC_CMD_STOP_R_(rx->channel_number));
2854	lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
2855
2856	lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
2857			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2858	lan743x_csr_write(adapter, INT_EN_CLR,
2859			  INT_BIT_DMA_RX_(rx->channel_number));
2860	napi_disable(&rx->napi);
2861
2862	netif_napi_del(&rx->napi);
2863
2864	lan743x_rx_ring_cleanup(rx);
2865}
2866
2867static int lan743x_rx_open(struct lan743x_rx *rx)
2868{
2869	struct lan743x_adapter *adapter = rx->adapter;
2870	u32 data = 0;
2871	int ret;
2872
2873	rx->frame_count = 0;
2874	ret = lan743x_rx_ring_init(rx);
2875	if (ret)
2876		goto return_error;
2877
2878	netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll);
2879
2880	lan743x_csr_write(adapter, DMAC_CMD,
2881			  DMAC_CMD_RX_SWR_(rx->channel_number));
2882	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2883				 DMAC_CMD_RX_SWR_(rx->channel_number),
2884				 0, 1000, 20000, 100);
2885
2886	/* set ring base address */
2887	lan743x_csr_write(adapter,
2888			  RX_BASE_ADDRH(rx->channel_number),
2889			  DMA_ADDR_HIGH32(rx->ring_dma_ptr));
2890	lan743x_csr_write(adapter,
2891			  RX_BASE_ADDRL(rx->channel_number),
2892			  DMA_ADDR_LOW32(rx->ring_dma_ptr));
2893
2894	/* set rx write back address */
2895	lan743x_csr_write(adapter,
2896			  RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
2897			  DMA_ADDR_HIGH32(rx->head_dma_ptr));
2898	lan743x_csr_write(adapter,
2899			  RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
2900			  DMA_ADDR_LOW32(rx->head_dma_ptr));
2901	data = RX_CFG_A_RX_HP_WB_EN_;
2902	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2903		data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
2904			RX_CFG_A_RX_WB_THRES_SET_(0x7) |
2905			RX_CFG_A_RX_PF_THRES_SET_(16) |
2906			RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
2907	}
2908
2909	/* set RX_CFG_A */
2910	lan743x_csr_write(adapter,
2911			  RX_CFG_A(rx->channel_number), data);
2912
2913	/* set RX_CFG_B */
2914	data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
2915	data &= ~RX_CFG_B_RX_PAD_MASK_;
2916	if (!RX_HEAD_PADDING)
2917		data |= RX_CFG_B_RX_PAD_0_;
2918	else
2919		data |= RX_CFG_B_RX_PAD_2_;
2920	data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
2921	data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
2922	data |= RX_CFG_B_TS_ALL_RX_;
2923	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2924		data |= RX_CFG_B_RDMABL_512_;
2925
2926	lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
2927	rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2928							 INT_BIT_DMA_RX_
2929							 (rx->channel_number));
2930
2931	/* set RX_CFG_C */
2932	data = 0;
2933	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2934		data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
2935	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2936		data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
2937	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2938		data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
2939	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2940		data |= RX_CFG_C_RX_INT_EN_R2C_;
2941	lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
2942
2943	rx->last_tail = ((u32)(rx->ring_size - 1));
2944	lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2945			  rx->last_tail);
2946	rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
2947	if (rx->last_head) {
2948		ret = -EIO;
2949		goto napi_delete;
2950	}
2951
2952	napi_enable(&rx->napi);
2953
2954	lan743x_csr_write(adapter, INT_EN_SET,
2955			  INT_BIT_DMA_RX_(rx->channel_number));
2956	lan743x_csr_write(adapter, DMAC_INT_STS,
2957			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2958	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2959			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2960	lan743x_csr_write(adapter, DMAC_CMD,
2961			  DMAC_CMD_START_R_(rx->channel_number));
2962
2963	/* initialize fifo */
2964	lan743x_csr_write(adapter, FCT_RX_CTL,
2965			  FCT_RX_CTL_RESET_(rx->channel_number));
2966	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2967				 FCT_RX_CTL_RESET_(rx->channel_number),
2968				 0, 1000, 20000, 100);
2969	lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
2970			  FCT_FLOW_CTL_REQ_EN_ |
2971			  FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
2972			  FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
2973
2974	/* enable fifo */
2975	lan743x_csr_write(adapter, FCT_RX_CTL,
2976			  FCT_RX_CTL_EN_(rx->channel_number));
2977	return 0;
2978
2979napi_delete:
2980	netif_napi_del(&rx->napi);
2981	lan743x_rx_ring_cleanup(rx);
2982
2983return_error:
2984	return ret;
2985}
2986
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2987static int lan743x_netdev_close(struct net_device *netdev)
2988{
2989	struct lan743x_adapter *adapter = netdev_priv(netdev);
2990	int index;
2991
2992	for (index = 0; index < adapter->used_tx_channels; index++)
2993		lan743x_tx_close(&adapter->tx[index]);
2994
2995	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
2996		lan743x_rx_close(&adapter->rx[index]);
2997
2998	lan743x_ptp_close(adapter);
2999
3000	lan743x_phy_close(adapter);
3001
3002	lan743x_mac_close(adapter);
3003
3004	lan743x_intr_close(adapter);
3005
3006	return 0;
3007}
3008
3009static int lan743x_netdev_open(struct net_device *netdev)
3010{
3011	struct lan743x_adapter *adapter = netdev_priv(netdev);
3012	int index;
3013	int ret;
3014
3015	ret = lan743x_intr_open(adapter);
3016	if (ret)
3017		goto return_error;
3018
3019	ret = lan743x_mac_open(adapter);
3020	if (ret)
3021		goto close_intr;
3022
3023	ret = lan743x_phy_open(adapter);
3024	if (ret)
3025		goto close_mac;
3026
3027	ret = lan743x_ptp_open(adapter);
3028	if (ret)
3029		goto close_phy;
3030
3031	lan743x_rfe_open(adapter);
3032
3033	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3034		ret = lan743x_rx_open(&adapter->rx[index]);
3035		if (ret)
3036			goto close_rx;
3037	}
3038
3039	for (index = 0; index < adapter->used_tx_channels; index++) {
3040		ret = lan743x_tx_open(&adapter->tx[index]);
3041		if (ret)
3042			goto close_tx;
3043	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3044	return 0;
3045
3046close_tx:
3047	for (index = 0; index < adapter->used_tx_channels; index++) {
3048		if (adapter->tx[index].ring_cpu_ptr)
3049			lan743x_tx_close(&adapter->tx[index]);
3050	}
3051
3052close_rx:
3053	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3054		if (adapter->rx[index].ring_cpu_ptr)
3055			lan743x_rx_close(&adapter->rx[index]);
3056	}
3057	lan743x_ptp_close(adapter);
3058
3059close_phy:
3060	lan743x_phy_close(adapter);
3061
3062close_mac:
3063	lan743x_mac_close(adapter);
3064
3065close_intr:
3066	lan743x_intr_close(adapter);
3067
3068return_error:
3069	netif_warn(adapter, ifup, adapter->netdev,
3070		   "Error opening LAN743x\n");
3071	return ret;
3072}
3073
3074static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
3075					     struct net_device *netdev)
3076{
3077	struct lan743x_adapter *adapter = netdev_priv(netdev);
3078	u8 ch = 0;
3079
3080	if (adapter->is_pci11x1x)
3081		ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS;
3082
3083	return lan743x_tx_xmit_frame(&adapter->tx[ch], skb);
3084}
3085
3086static int lan743x_netdev_ioctl(struct net_device *netdev,
3087				struct ifreq *ifr, int cmd)
3088{
 
 
3089	if (!netif_running(netdev))
3090		return -EINVAL;
3091	if (cmd == SIOCSHWTSTAMP)
3092		return lan743x_ptp_ioctl(netdev, ifr, cmd);
3093	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
 
3094}
3095
3096static void lan743x_netdev_set_multicast(struct net_device *netdev)
3097{
3098	struct lan743x_adapter *adapter = netdev_priv(netdev);
3099
3100	lan743x_rfe_set_multicast(adapter);
3101}
3102
3103static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
3104{
3105	struct lan743x_adapter *adapter = netdev_priv(netdev);
3106	int ret = 0;
3107
3108	ret = lan743x_mac_set_mtu(adapter, new_mtu);
3109	if (!ret)
3110		netdev->mtu = new_mtu;
3111	return ret;
3112}
3113
3114static void lan743x_netdev_get_stats64(struct net_device *netdev,
3115				       struct rtnl_link_stats64 *stats)
3116{
3117	struct lan743x_adapter *adapter = netdev_priv(netdev);
3118
3119	stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
3120	stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
3121	stats->rx_bytes = lan743x_csr_read(adapter,
3122					   STAT_RX_UNICAST_BYTE_COUNT) +
3123			  lan743x_csr_read(adapter,
3124					   STAT_RX_BROADCAST_BYTE_COUNT) +
3125			  lan743x_csr_read(adapter,
3126					   STAT_RX_MULTICAST_BYTE_COUNT);
3127	stats->tx_bytes = lan743x_csr_read(adapter,
3128					   STAT_TX_UNICAST_BYTE_COUNT) +
3129			  lan743x_csr_read(adapter,
3130					   STAT_TX_BROADCAST_BYTE_COUNT) +
3131			  lan743x_csr_read(adapter,
3132					   STAT_TX_MULTICAST_BYTE_COUNT);
3133	stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
3134			   lan743x_csr_read(adapter,
3135					    STAT_RX_ALIGNMENT_ERRORS) +
3136			   lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
3137			   lan743x_csr_read(adapter,
3138					    STAT_RX_UNDERSIZE_FRAME_ERRORS) +
3139			   lan743x_csr_read(adapter,
3140					    STAT_RX_OVERSIZE_FRAME_ERRORS);
3141	stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
3142			   lan743x_csr_read(adapter,
3143					    STAT_TX_EXCESS_DEFERRAL_ERRORS) +
3144			   lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
3145	stats->rx_dropped = lan743x_csr_read(adapter,
3146					     STAT_RX_DROPPED_FRAMES);
3147	stats->tx_dropped = lan743x_csr_read(adapter,
3148					     STAT_TX_EXCESSIVE_COLLISION);
3149	stats->multicast = lan743x_csr_read(adapter,
3150					    STAT_RX_MULTICAST_FRAMES) +
3151			   lan743x_csr_read(adapter,
3152					    STAT_TX_MULTICAST_FRAMES);
3153	stats->collisions = lan743x_csr_read(adapter,
3154					     STAT_TX_SINGLE_COLLISIONS) +
3155			    lan743x_csr_read(adapter,
3156					     STAT_TX_MULTIPLE_COLLISIONS) +
3157			    lan743x_csr_read(adapter,
3158					     STAT_TX_LATE_COLLISIONS);
3159}
3160
3161static int lan743x_netdev_set_mac_address(struct net_device *netdev,
3162					  void *addr)
3163{
3164	struct lan743x_adapter *adapter = netdev_priv(netdev);
3165	struct sockaddr *sock_addr = addr;
3166	int ret;
3167
3168	ret = eth_prepare_mac_addr_change(netdev, sock_addr);
3169	if (ret)
3170		return ret;
3171	eth_hw_addr_set(netdev, sock_addr->sa_data);
3172	lan743x_mac_set_address(adapter, sock_addr->sa_data);
3173	lan743x_rfe_update_mac_address(adapter);
3174	return 0;
3175}
3176
3177static const struct net_device_ops lan743x_netdev_ops = {
3178	.ndo_open		= lan743x_netdev_open,
3179	.ndo_stop		= lan743x_netdev_close,
3180	.ndo_start_xmit		= lan743x_netdev_xmit_frame,
3181	.ndo_eth_ioctl		= lan743x_netdev_ioctl,
3182	.ndo_set_rx_mode	= lan743x_netdev_set_multicast,
3183	.ndo_change_mtu		= lan743x_netdev_change_mtu,
3184	.ndo_get_stats64	= lan743x_netdev_get_stats64,
3185	.ndo_set_mac_address	= lan743x_netdev_set_mac_address,
3186};
3187
3188static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
3189{
3190	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
3191}
3192
3193static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
3194{
3195	mdiobus_unregister(adapter->mdiobus);
3196}
3197
 
 
 
 
 
 
3198static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
3199{
3200	unregister_netdev(adapter->netdev);
3201
 
3202	lan743x_mdiobus_cleanup(adapter);
3203	lan743x_hardware_cleanup(adapter);
3204	lan743x_pci_cleanup(adapter);
3205}
3206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3207static int lan743x_hardware_init(struct lan743x_adapter *adapter,
3208				 struct pci_dev *pdev)
3209{
3210	struct lan743x_tx *tx;
3211	int index;
3212	int ret;
3213
3214	adapter->is_pci11x1x = is_pci11x1x_chip(adapter);
3215	if (adapter->is_pci11x1x) {
3216		adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS;
3217		adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
3218		adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
3219		pci11x1x_strap_get_status(adapter);
3220		spin_lock_init(&adapter->eth_syslock_spinlock);
3221		mutex_init(&adapter->sgmii_rw_lock);
 
3222	} else {
3223		adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
3224		adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
3225		adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT;
3226	}
3227
3228	adapter->intr.irq = adapter->pdev->irq;
3229	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
3230
3231	ret = lan743x_gpio_init(adapter);
3232	if (ret)
3233		return ret;
3234
3235	ret = lan743x_mac_init(adapter);
3236	if (ret)
3237		return ret;
3238
3239	ret = lan743x_phy_init(adapter);
3240	if (ret)
3241		return ret;
3242
3243	ret = lan743x_ptp_init(adapter);
3244	if (ret)
3245		return ret;
3246
3247	lan743x_rfe_update_mac_address(adapter);
3248
3249	ret = lan743x_dmac_init(adapter);
3250	if (ret)
3251		return ret;
3252
3253	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3254		adapter->rx[index].adapter = adapter;
3255		adapter->rx[index].channel_number = index;
3256	}
3257
3258	for (index = 0; index < adapter->used_tx_channels; index++) {
3259		tx = &adapter->tx[index];
3260		tx->adapter = adapter;
3261		tx->channel_number = index;
3262		spin_lock_init(&tx->ring_lock);
3263	}
3264
3265	return 0;
3266}
3267
3268static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
3269{
3270	u32 sgmii_ctl;
3271	int ret;
3272
3273	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
3274	if (!(adapter->mdiobus)) {
3275		ret = -ENOMEM;
3276		goto return_error;
3277	}
3278
3279	adapter->mdiobus->priv = (void *)adapter;
3280	if (adapter->is_pci11x1x) {
3281		if (adapter->is_sgmii_en) {
3282			sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
3283			sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
3284			sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
3285			lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
3286			netif_dbg(adapter, drv, adapter->netdev,
3287				  "SGMII operation\n");
3288			adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
3289			adapter->mdiobus->read = lan743x_mdiobus_c45_read;
3290			adapter->mdiobus->write = lan743x_mdiobus_c45_write;
 
3291			adapter->mdiobus->name = "lan743x-mdiobus-c45";
3292			netif_dbg(adapter, drv, adapter->netdev,
3293				  "lan743x-mdiobus-c45\n");
3294		} else {
3295			sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
3296			sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
3297			sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
3298			lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
3299			netif_dbg(adapter, drv, adapter->netdev,
3300				  "RGMII operation\n");
3301			// Only C22 support when RGMII I/F
3302			adapter->mdiobus->probe_capabilities = MDIOBUS_C22;
3303			adapter->mdiobus->read = lan743x_mdiobus_read;
3304			adapter->mdiobus->write = lan743x_mdiobus_write;
3305			adapter->mdiobus->name = "lan743x-mdiobus";
3306			netif_dbg(adapter, drv, adapter->netdev,
3307				  "lan743x-mdiobus\n");
3308		}
3309	} else {
3310		adapter->mdiobus->read = lan743x_mdiobus_read;
3311		adapter->mdiobus->write = lan743x_mdiobus_write;
3312		adapter->mdiobus->name = "lan743x-mdiobus";
3313		netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
3314	}
3315
3316	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
3317		 "pci-%s", pci_name(adapter->pdev));
3318
3319	if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
3320		/* LAN7430 uses internal phy at address 1 */
3321		adapter->mdiobus->phy_mask = ~(u32)BIT(1);
3322
3323	/* register mdiobus */
3324	ret = mdiobus_register(adapter->mdiobus);
3325	if (ret < 0)
3326		goto return_error;
3327	return 0;
3328
3329return_error:
3330	return ret;
3331}
3332
3333/* lan743x_pcidev_probe - Device Initialization Routine
3334 * @pdev: PCI device information struct
3335 * @id: entry in lan743x_pci_tbl
3336 *
3337 * Returns 0 on success, negative on failure
3338 *
3339 * initializes an adapter identified by a pci_dev structure.
3340 * The OS initialization, configuring of the adapter private structure,
3341 * and a hardware reset occur.
3342 **/
3343static int lan743x_pcidev_probe(struct pci_dev *pdev,
3344				const struct pci_device_id *id)
3345{
3346	struct lan743x_adapter *adapter = NULL;
3347	struct net_device *netdev = NULL;
3348	int ret = -ENODEV;
3349
3350	if (id->device == PCI_DEVICE_ID_SMSC_A011 ||
3351	    id->device == PCI_DEVICE_ID_SMSC_A041) {
3352		netdev = devm_alloc_etherdev_mqs(&pdev->dev,
3353						 sizeof(struct lan743x_adapter),
3354						 PCI11X1X_USED_TX_CHANNELS,
3355						 LAN743X_USED_RX_CHANNELS);
3356	} else {
3357		netdev = devm_alloc_etherdev_mqs(&pdev->dev,
3358						 sizeof(struct lan743x_adapter),
3359						 LAN743X_USED_TX_CHANNELS,
3360						 LAN743X_USED_RX_CHANNELS);
3361	}
3362
3363	if (!netdev)
3364		goto return_error;
3365
3366	SET_NETDEV_DEV(netdev, &pdev->dev);
3367	pci_set_drvdata(pdev, netdev);
3368	adapter = netdev_priv(netdev);
3369	adapter->netdev = netdev;
3370	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
3371			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
3372			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
3373	netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
3374
3375	of_get_mac_address(pdev->dev.of_node, adapter->mac_address);
3376
3377	ret = lan743x_pci_init(adapter, pdev);
3378	if (ret)
3379		goto return_error;
3380
3381	ret = lan743x_csr_init(adapter);
3382	if (ret)
3383		goto cleanup_pci;
3384
3385	ret = lan743x_hardware_init(adapter, pdev);
3386	if (ret)
3387		goto cleanup_pci;
3388
3389	ret = lan743x_mdiobus_init(adapter);
3390	if (ret)
3391		goto cleanup_hardware;
3392
3393	adapter->netdev->netdev_ops = &lan743x_netdev_ops;
3394	adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
3395	adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO |
3396				    NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3397	adapter->netdev->hw_features = adapter->netdev->features;
3398
3399	/* carrier off reporting is important to ethtool even BEFORE open */
3400	netif_carrier_off(netdev);
 
 
 
 
3401
3402	ret = register_netdev(adapter->netdev);
3403	if (ret < 0)
3404		goto cleanup_mdiobus;
3405	return 0;
3406
 
 
 
3407cleanup_mdiobus:
3408	lan743x_mdiobus_cleanup(adapter);
3409
3410cleanup_hardware:
3411	lan743x_hardware_cleanup(adapter);
3412
3413cleanup_pci:
3414	lan743x_pci_cleanup(adapter);
3415
3416return_error:
3417	pr_warn("Initialization failed\n");
3418	return ret;
3419}
3420
3421/**
3422 * lan743x_pcidev_remove - Device Removal Routine
3423 * @pdev: PCI device information struct
3424 *
3425 * this is called by the PCI subsystem to alert the driver
3426 * that it should release a PCI device.  This could be caused by a
3427 * Hot-Plug event, or because the driver is going to be removed from
3428 * memory.
3429 **/
3430static void lan743x_pcidev_remove(struct pci_dev *pdev)
3431{
3432	struct net_device *netdev = pci_get_drvdata(pdev);
3433	struct lan743x_adapter *adapter = netdev_priv(netdev);
3434
3435	lan743x_full_cleanup(adapter);
3436}
3437
3438static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
3439{
3440	struct net_device *netdev = pci_get_drvdata(pdev);
3441	struct lan743x_adapter *adapter = netdev_priv(netdev);
3442
3443	rtnl_lock();
3444	netif_device_detach(netdev);
3445
3446	/* close netdev when netdev is at running state.
3447	 * For instance, it is true when system goes to sleep by pm-suspend
3448	 * However, it is false when system goes to sleep by suspend GUI menu
3449	 */
3450	if (netif_running(netdev))
3451		lan743x_netdev_close(netdev);
3452	rtnl_unlock();
3453
3454#ifdef CONFIG_PM
3455	pci_save_state(pdev);
3456#endif
3457
3458	/* clean up lan743x portion */
3459	lan743x_hardware_cleanup(adapter);
3460}
3461
3462#ifdef CONFIG_PM_SLEEP
3463static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
3464{
3465	return bitrev16(crc16(0xFFFF, buf, len));
3466}
3467
3468static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
3469{
3470	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3471	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3472	const u8 arp_type[2] = { 0x08, 0x06 };
3473	int mask_index;
3474	u32 sopass;
3475	u32 pmtctl;
3476	u32 wucsr;
3477	u32 macrx;
3478	u16 crc;
3479
3480	for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
3481		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
3482
3483	/* clear wake settings */
3484	pmtctl = lan743x_csr_read(adapter, PMT_CTL);
3485	pmtctl |= PMT_CTL_WUPS_MASK_;
3486	pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
3487		PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
3488		PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
3489
3490	macrx = lan743x_csr_read(adapter, MAC_RX);
3491
3492	wucsr = 0;
3493	mask_index = 0;
3494
3495	pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
3496
3497	if (adapter->wolopts & WAKE_PHY) {
3498		pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
3499		pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
3500	}
3501	if (adapter->wolopts & WAKE_MAGIC) {
3502		wucsr |= MAC_WUCSR_MPEN_;
3503		macrx |= MAC_RX_RXEN_;
3504		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3505	}
3506	if (adapter->wolopts & WAKE_UCAST) {
3507		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
3508		macrx |= MAC_RX_RXEN_;
3509		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3510		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3511	}
3512	if (adapter->wolopts & WAKE_BCAST) {
3513		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
3514		macrx |= MAC_RX_RXEN_;
3515		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3516		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3517	}
3518	if (adapter->wolopts & WAKE_MCAST) {
3519		/* IPv4 multicast */
3520		crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
3521		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3522				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3523				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3524				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3525		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
3526		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3527		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3528		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3529		mask_index++;
3530
3531		/* IPv6 multicast */
3532		crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
3533		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3534				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3535				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3536				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3537		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
3538		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3539		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3540		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3541		mask_index++;
3542
3543		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3544		macrx |= MAC_RX_RXEN_;
3545		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3546		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3547	}
3548	if (adapter->wolopts & WAKE_ARP) {
3549		/* set MAC_WUF_CFG & WUF_MASK
3550		 * for packettype (offset 12,13) = ARP (0x0806)
3551		 */
3552		crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
3553		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3554				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
3555				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3556				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3557		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
3558		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3559		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3560		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3561		mask_index++;
3562
3563		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3564		macrx |= MAC_RX_RXEN_;
3565		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3566		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3567	}
3568
3569	if (adapter->wolopts & WAKE_MAGICSECURE) {
3570		sopass = *(u32 *)adapter->sopass;
3571		lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass);
3572		sopass = *(u16 *)&adapter->sopass[4];
3573		lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass);
3574		wucsr |= MAC_MP_SO_EN_;
3575	}
3576
3577	lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
3578	lan743x_csr_write(adapter, PMT_CTL, pmtctl);
3579	lan743x_csr_write(adapter, MAC_RX, macrx);
3580}
3581
3582static int lan743x_pm_suspend(struct device *dev)
3583{
3584	struct pci_dev *pdev = to_pci_dev(dev);
3585	struct net_device *netdev = pci_get_drvdata(pdev);
3586	struct lan743x_adapter *adapter = netdev_priv(netdev);
3587	u32 data;
3588
3589	lan743x_pcidev_shutdown(pdev);
3590
3591	/* clear all wakes */
3592	lan743x_csr_write(adapter, MAC_WUCSR, 0);
3593	lan743x_csr_write(adapter, MAC_WUCSR2, 0);
3594	lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
3595
3596	if (adapter->wolopts)
3597		lan743x_pm_set_wol(adapter);
3598
3599	if (adapter->is_pci11x1x) {
3600		/* Save HW_CFG to config again in PM resume */
3601		data = lan743x_csr_read(adapter, HW_CFG);
3602		adapter->hw_cfg = data;
3603		data |= (HW_CFG_RST_PROTECT_PCIE_ |
3604			 HW_CFG_D3_RESET_DIS_ |
3605			 HW_CFG_D3_VAUX_OVR_ |
3606			 HW_CFG_HOT_RESET_DIS_ |
3607			 HW_CFG_RST_PROTECT_);
3608		lan743x_csr_write(adapter, HW_CFG, data);
3609	}
3610
3611	/* Host sets PME_En, put D3hot */
3612	return pci_prepare_to_sleep(pdev);
3613}
3614
3615static int lan743x_pm_resume(struct device *dev)
3616{
3617	struct pci_dev *pdev = to_pci_dev(dev);
3618	struct net_device *netdev = pci_get_drvdata(pdev);
3619	struct lan743x_adapter *adapter = netdev_priv(netdev);
 
3620	int ret;
3621
3622	pci_set_power_state(pdev, PCI_D0);
3623	pci_restore_state(pdev);
3624	pci_save_state(pdev);
3625
3626	/* Restore HW_CFG that was saved during pm suspend */
3627	if (adapter->is_pci11x1x)
3628		lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg);
3629
3630	ret = lan743x_hardware_init(adapter, pdev);
3631	if (ret) {
3632		netif_err(adapter, probe, adapter->netdev,
3633			  "lan743x_hardware_init returned %d\n", ret);
3634		lan743x_pci_cleanup(adapter);
3635		return ret;
3636	}
3637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3638	/* open netdev when netdev is at running state while resume.
3639	 * For instance, it is true when system wakesup after pm-suspend
3640	 * However, it is false when system wakes up after suspend GUI menu
3641	 */
3642	if (netif_running(netdev))
3643		lan743x_netdev_open(netdev);
3644
3645	netif_device_attach(netdev);
3646	ret = lan743x_csr_read(adapter, MAC_WK_SRC);
3647	netif_info(adapter, drv, adapter->netdev,
3648		   "Wakeup source : 0x%08X\n", ret);
3649
3650	return 0;
3651}
3652
3653static const struct dev_pm_ops lan743x_pm_ops = {
3654	SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3655};
3656#endif /* CONFIG_PM_SLEEP */
3657
3658static const struct pci_device_id lan743x_pcidev_tbl[] = {
3659	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
3660	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
3661	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) },
3662	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) },
3663	{ 0, }
3664};
3665
3666MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
3667
3668static struct pci_driver lan743x_pcidev_driver = {
3669	.name     = DRIVER_NAME,
3670	.id_table = lan743x_pcidev_tbl,
3671	.probe    = lan743x_pcidev_probe,
3672	.remove   = lan743x_pcidev_remove,
3673#ifdef CONFIG_PM_SLEEP
3674	.driver.pm = &lan743x_pm_ops,
3675#endif
3676	.shutdown = lan743x_pcidev_shutdown,
3677};
3678
3679module_pci_driver(lan743x_pcidev_driver);
3680
3681MODULE_AUTHOR(DRIVER_AUTHOR);
3682MODULE_DESCRIPTION(DRIVER_DESC);
3683MODULE_LICENSE("GPL");