Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/* Copyright (C) 2018 Microchip Technology Inc. */
   3
   4#include <linux/module.h>
   5#include <linux/pci.h>
   6#include <linux/netdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/crc32.h>
   9#include <linux/microchipphy.h>
  10#include <linux/net_tstamp.h>
  11#include <linux/of_mdio.h>
  12#include <linux/of_net.h>
  13#include <linux/phy.h>
  14#include <linux/phy_fixed.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/iopoll.h>
  17#include <linux/crc16.h>
  18#include "lan743x_main.h"
  19#include "lan743x_ethtool.h"
  20
  21static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
  22{
  23	pci_release_selected_regions(adapter->pdev,
  24				     pci_select_bars(adapter->pdev,
  25						     IORESOURCE_MEM));
  26	pci_disable_device(adapter->pdev);
  27}
  28
  29static int lan743x_pci_init(struct lan743x_adapter *adapter,
  30			    struct pci_dev *pdev)
  31{
  32	unsigned long bars = 0;
  33	int ret;
  34
  35	adapter->pdev = pdev;
  36	ret = pci_enable_device_mem(pdev);
  37	if (ret)
  38		goto return_error;
  39
  40	netif_info(adapter, probe, adapter->netdev,
  41		   "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
  42		   pdev->vendor, pdev->device);
  43	bars = pci_select_bars(pdev, IORESOURCE_MEM);
  44	if (!test_bit(0, &bars))
  45		goto disable_device;
  46
  47	ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
  48	if (ret)
  49		goto disable_device;
  50
  51	pci_set_master(pdev);
  52	return 0;
  53
  54disable_device:
  55	pci_disable_device(adapter->pdev);
  56
  57return_error:
  58	return ret;
  59}
  60
  61u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
  62{
  63	return ioread32(&adapter->csr.csr_address[offset]);
  64}
  65
  66void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
  67		       u32 data)
  68{
  69	iowrite32(data, &adapter->csr.csr_address[offset]);
  70}
  71
  72#define LAN743X_CSR_READ_OP(offset)	lan743x_csr_read(adapter, offset)
  73
  74static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
  75{
  76	u32 data;
  77
  78	data = lan743x_csr_read(adapter, HW_CFG);
  79	data |= HW_CFG_LRST_;
  80	lan743x_csr_write(adapter, HW_CFG, data);
  81
  82	return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
  83				  !(data & HW_CFG_LRST_), 100000, 10000000);
  84}
  85
  86static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
  87				    int offset, u32 bit_mask,
  88				    int target_value, int usleep_min,
  89				    int usleep_max, int count)
  90{
  91	u32 data;
  92
  93	return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
  94				  target_value == ((data & bit_mask) ? 1 : 0),
  95				  usleep_max, usleep_min * count);
  96}
  97
  98static int lan743x_csr_init(struct lan743x_adapter *adapter)
  99{
 100	struct lan743x_csr *csr = &adapter->csr;
 101	resource_size_t bar_start, bar_length;
 102	int result;
 103
 104	bar_start = pci_resource_start(adapter->pdev, 0);
 105	bar_length = pci_resource_len(adapter->pdev, 0);
 106	csr->csr_address = devm_ioremap(&adapter->pdev->dev,
 107					bar_start, bar_length);
 108	if (!csr->csr_address) {
 109		result = -ENOMEM;
 110		goto clean_up;
 111	}
 112
 113	csr->id_rev = lan743x_csr_read(adapter, ID_REV);
 114	csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
 115	netif_info(adapter, probe, adapter->netdev,
 116		   "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
 117		   csr->id_rev,	FPGA_REV_GET_MAJOR_(csr->fpga_rev),
 118		   FPGA_REV_GET_MINOR_(csr->fpga_rev));
 119	if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) {
 120		result = -ENODEV;
 121		goto clean_up;
 122	}
 123
 124	csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 125	switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
 126	case ID_REV_CHIP_REV_A0_:
 127		csr->flags |= LAN743X_CSR_FLAG_IS_A0;
 128		csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 129		break;
 130	case ID_REV_CHIP_REV_B0_:
 131		csr->flags |= LAN743X_CSR_FLAG_IS_B0;
 132		break;
 133	}
 134
 135	result = lan743x_csr_light_reset(adapter);
 136	if (result)
 137		goto clean_up;
 138	return 0;
 139clean_up:
 140	return result;
 141}
 142
 143static void lan743x_intr_software_isr(struct lan743x_adapter *adapter)
 144{
 145	struct lan743x_intr *intr = &adapter->intr;
 146
 147	/* disable the interrupt to prevent repeated re-triggering */
 148	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
 149	intr->software_isr_flag = true;
 150	wake_up(&intr->software_isr_wq);
 151}
 152
 153static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
 154{
 155	struct lan743x_tx *tx = context;
 156	struct lan743x_adapter *adapter = tx->adapter;
 157	bool enable_flag = true;
 158
 159	lan743x_csr_read(adapter, INT_EN_SET);
 160	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 161		lan743x_csr_write(adapter, INT_EN_CLR,
 162				  INT_BIT_DMA_TX_(tx->channel_number));
 163	}
 164
 165	if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
 166		u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
 167		u32 dmac_int_sts;
 168		u32 dmac_int_en;
 169
 170		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 171			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 172		else
 173			dmac_int_sts = ioc_bit;
 174		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 175			dmac_int_en = lan743x_csr_read(adapter,
 176						       DMAC_INT_EN_SET);
 177		else
 178			dmac_int_en = ioc_bit;
 179
 180		dmac_int_en &= ioc_bit;
 181		dmac_int_sts &= dmac_int_en;
 182		if (dmac_int_sts & ioc_bit) {
 183			napi_schedule(&tx->napi);
 184			enable_flag = false;/* poll func will enable later */
 185		}
 186	}
 187
 188	if (enable_flag)
 189		/* enable isr */
 190		lan743x_csr_write(adapter, INT_EN_SET,
 191				  INT_BIT_DMA_TX_(tx->channel_number));
 192}
 193
 194static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
 195{
 196	struct lan743x_rx *rx = context;
 197	struct lan743x_adapter *adapter = rx->adapter;
 198	bool enable_flag = true;
 199
 200	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 201		lan743x_csr_write(adapter, INT_EN_CLR,
 202				  INT_BIT_DMA_RX_(rx->channel_number));
 203	}
 204
 205	if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
 206		u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
 207		u32 dmac_int_sts;
 208		u32 dmac_int_en;
 209
 210		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 211			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 212		else
 213			dmac_int_sts = rx_frame_bit;
 214		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 215			dmac_int_en = lan743x_csr_read(adapter,
 216						       DMAC_INT_EN_SET);
 217		else
 218			dmac_int_en = rx_frame_bit;
 219
 220		dmac_int_en &= rx_frame_bit;
 221		dmac_int_sts &= dmac_int_en;
 222		if (dmac_int_sts & rx_frame_bit) {
 223			napi_schedule(&rx->napi);
 224			enable_flag = false;/* poll funct will enable later */
 225		}
 226	}
 227
 228	if (enable_flag) {
 229		/* enable isr */
 230		lan743x_csr_write(adapter, INT_EN_SET,
 231				  INT_BIT_DMA_RX_(rx->channel_number));
 232	}
 233}
 234
 235static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
 236{
 237	struct lan743x_adapter *adapter = context;
 238	unsigned int channel;
 239
 240	if (int_sts & INT_BIT_ALL_RX_) {
 241		for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
 242			channel++) {
 243			u32 int_bit = INT_BIT_DMA_RX_(channel);
 244
 245			if (int_sts & int_bit) {
 246				lan743x_rx_isr(&adapter->rx[channel],
 247					       int_bit, flags);
 248				int_sts &= ~int_bit;
 249			}
 250		}
 251	}
 252	if (int_sts & INT_BIT_ALL_TX_) {
 253		for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
 254			channel++) {
 255			u32 int_bit = INT_BIT_DMA_TX_(channel);
 256
 257			if (int_sts & int_bit) {
 258				lan743x_tx_isr(&adapter->tx[channel],
 259					       int_bit, flags);
 260				int_sts &= ~int_bit;
 261			}
 262		}
 263	}
 264	if (int_sts & INT_BIT_ALL_OTHER_) {
 265		if (int_sts & INT_BIT_SW_GP_) {
 266			lan743x_intr_software_isr(adapter);
 267			int_sts &= ~INT_BIT_SW_GP_;
 268		}
 269		if (int_sts & INT_BIT_1588_) {
 270			lan743x_ptp_isr(adapter);
 271			int_sts &= ~INT_BIT_1588_;
 272		}
 273	}
 274	if (int_sts)
 275		lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
 276}
 277
 278static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
 279{
 280	struct lan743x_vector *vector = ptr;
 281	struct lan743x_adapter *adapter = vector->adapter;
 282	irqreturn_t result = IRQ_NONE;
 283	u32 int_enables;
 284	u32 int_sts;
 285
 286	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
 287		int_sts = lan743x_csr_read(adapter, INT_STS);
 288	} else if (vector->flags &
 289		   (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
 290		   LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
 291		int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
 292	} else {
 293		/* use mask as implied status */
 294		int_sts = vector->int_mask | INT_BIT_MAS_;
 295	}
 296
 297	if (!(int_sts & INT_BIT_MAS_))
 298		goto irq_done;
 299
 300	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
 301		/* disable vector interrupt */
 302		lan743x_csr_write(adapter,
 303				  INT_VEC_EN_CLR,
 304				  INT_VEC_EN_(vector->vector_index));
 305
 306	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
 307		/* disable master interrupt */
 308		lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 309
 310	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
 311		int_enables = lan743x_csr_read(adapter, INT_EN_SET);
 312	} else {
 313		/*  use vector mask as implied enable mask */
 314		int_enables = vector->int_mask;
 315	}
 316
 317	int_sts &= int_enables;
 318	int_sts &= vector->int_mask;
 319	if (int_sts) {
 320		if (vector->handler) {
 321			vector->handler(vector->context,
 322					int_sts, vector->flags);
 323		} else {
 324			/* disable interrupts on this vector */
 325			lan743x_csr_write(adapter, INT_EN_CLR,
 326					  vector->int_mask);
 327		}
 328		result = IRQ_HANDLED;
 329	}
 330
 331	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
 332		/* enable master interrupt */
 333		lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 334
 335	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
 336		/* enable vector interrupt */
 337		lan743x_csr_write(adapter,
 338				  INT_VEC_EN_SET,
 339				  INT_VEC_EN_(vector->vector_index));
 340irq_done:
 341	return result;
 342}
 343
 344static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
 345{
 346	struct lan743x_intr *intr = &adapter->intr;
 347	int ret;
 348
 349	intr->software_isr_flag = false;
 350
 351	/* enable and activate test interrupt */
 352	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
 353	lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
 354
 355	ret = wait_event_timeout(intr->software_isr_wq,
 356				 intr->software_isr_flag,
 357				 msecs_to_jiffies(200));
 358
 359	/* disable test interrupt */
 360	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
 361
 362	return ret > 0 ? 0 : -ENODEV;
 363}
 364
 365static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
 366				     int vector_index, u32 flags,
 367				     u32 int_mask,
 368				     lan743x_vector_handler handler,
 369				     void *context)
 370{
 371	struct lan743x_vector *vector = &adapter->intr.vector_list
 372					[vector_index];
 373	int ret;
 374
 375	vector->adapter = adapter;
 376	vector->flags = flags;
 377	vector->vector_index = vector_index;
 378	vector->int_mask = int_mask;
 379	vector->handler = handler;
 380	vector->context = context;
 381
 382	ret = request_irq(vector->irq,
 383			  lan743x_intr_entry_isr,
 384			  (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
 385			  IRQF_SHARED : 0, DRIVER_NAME, vector);
 386	if (ret) {
 387		vector->handler = NULL;
 388		vector->context = NULL;
 389		vector->int_mask = 0;
 390		vector->flags = 0;
 391	}
 392	return ret;
 393}
 394
 395static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
 396					int vector_index)
 397{
 398	struct lan743x_vector *vector = &adapter->intr.vector_list
 399					[vector_index];
 400
 401	free_irq(vector->irq, vector);
 402	vector->handler = NULL;
 403	vector->context = NULL;
 404	vector->int_mask = 0;
 405	vector->flags = 0;
 406}
 407
 408static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
 409					 u32 int_mask)
 410{
 411	int index;
 412
 413	for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
 414		if (adapter->intr.vector_list[index].int_mask & int_mask)
 415			return adapter->intr.vector_list[index].flags;
 416	}
 417	return 0;
 418}
 419
 420static void lan743x_intr_close(struct lan743x_adapter *adapter)
 421{
 422	struct lan743x_intr *intr = &adapter->intr;
 423	int index = 0;
 424
 425	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 426	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
 427
 428	for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
 429		if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
 430			lan743x_intr_unregister_isr(adapter, index);
 431			intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
 432		}
 433	}
 434
 435	if (intr->flags & INTR_FLAG_MSI_ENABLED) {
 436		pci_disable_msi(adapter->pdev);
 437		intr->flags &= ~INTR_FLAG_MSI_ENABLED;
 438	}
 439
 440	if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
 441		pci_disable_msix(adapter->pdev);
 442		intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
 443	}
 444}
 445
 446static int lan743x_intr_open(struct lan743x_adapter *adapter)
 447{
 448	struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
 449	struct lan743x_intr *intr = &adapter->intr;
 450	u32 int_vec_en_auto_clr = 0;
 451	u32 int_vec_map0 = 0;
 452	u32 int_vec_map1 = 0;
 453	int ret = -ENODEV;
 454	int index = 0;
 455	u32 flags = 0;
 456
 457	intr->number_of_vectors = 0;
 458
 459	/* Try to set up MSIX interrupts */
 460	memset(&msix_entries[0], 0,
 461	       sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
 462	for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
 463		msix_entries[index].entry = index;
 464	ret = pci_enable_msix_range(adapter->pdev,
 465				    msix_entries, 1,
 466				    1 + LAN743X_USED_TX_CHANNELS +
 467				    LAN743X_USED_RX_CHANNELS);
 468
 469	if (ret > 0) {
 470		intr->flags |= INTR_FLAG_MSIX_ENABLED;
 471		intr->number_of_vectors = ret;
 472		intr->using_vectors = true;
 473		for (index = 0; index < intr->number_of_vectors; index++)
 474			intr->vector_list[index].irq = msix_entries
 475						       [index].vector;
 476		netif_info(adapter, ifup, adapter->netdev,
 477			   "using MSIX interrupts, number of vectors = %d\n",
 478			   intr->number_of_vectors);
 479	}
 480
 481	/* If MSIX failed try to setup using MSI interrupts */
 482	if (!intr->number_of_vectors) {
 483		if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 484			if (!pci_enable_msi(adapter->pdev)) {
 485				intr->flags |= INTR_FLAG_MSI_ENABLED;
 486				intr->number_of_vectors = 1;
 487				intr->using_vectors = true;
 488				intr->vector_list[0].irq =
 489					adapter->pdev->irq;
 490				netif_info(adapter, ifup, adapter->netdev,
 491					   "using MSI interrupts, number of vectors = %d\n",
 492					   intr->number_of_vectors);
 493			}
 494		}
 495	}
 496
 497	/* If MSIX, and MSI failed, setup using legacy interrupt */
 498	if (!intr->number_of_vectors) {
 499		intr->number_of_vectors = 1;
 500		intr->using_vectors = false;
 501		intr->vector_list[0].irq = intr->irq;
 502		netif_info(adapter, ifup, adapter->netdev,
 503			   "using legacy interrupts\n");
 504	}
 505
 506	/* At this point we must have at least one irq */
 507	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
 508
 509	/* map all interrupts to vector 0 */
 510	lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
 511	lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
 512	lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
 513	flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 514		LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 515		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 516		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 517
 518	if (intr->using_vectors) {
 519		flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 520			 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 521	} else {
 522		flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
 523			 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
 524			 LAN743X_VECTOR_FLAG_IRQ_SHARED;
 525	}
 526
 527	if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 528		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
 529		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
 530		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 531		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
 532		flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
 533		flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
 534	}
 535
 536	init_waitqueue_head(&intr->software_isr_wq);
 537
 538	ret = lan743x_intr_register_isr(adapter, 0, flags,
 539					INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
 540					INT_BIT_ALL_OTHER_,
 541					lan743x_intr_shared_isr, adapter);
 542	if (ret)
 543		goto clean_up;
 544	intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
 545
 546	if (intr->using_vectors)
 547		lan743x_csr_write(adapter, INT_VEC_EN_SET,
 548				  INT_VEC_EN_(0));
 549
 550	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 551		lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
 552		lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
 553		lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
 554		lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
 555		lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
 556		lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
 557		lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
 558		lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
 559		lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
 560		lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
 561		lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
 562	}
 563
 564	/* enable interrupts */
 565	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 566	ret = lan743x_intr_test_isr(adapter);
 567	if (ret)
 568		goto clean_up;
 569
 570	if (intr->number_of_vectors > 1) {
 571		int number_of_tx_vectors = intr->number_of_vectors - 1;
 572
 573		if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
 574			number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
 575		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 576			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 577			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 578			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 579			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 580			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 581
 582		if (adapter->csr.flags &
 583		   LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 584			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 585				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 586				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 587				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 588		}
 589
 590		for (index = 0; index < number_of_tx_vectors; index++) {
 591			u32 int_bit = INT_BIT_DMA_TX_(index);
 592			int vector = index + 1;
 593
 594			/* map TX interrupt to vector */
 595			int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
 596			lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
 597
 598			/* Remove TX interrupt from shared mask */
 599			intr->vector_list[0].int_mask &= ~int_bit;
 600			ret = lan743x_intr_register_isr(adapter, vector, flags,
 601							int_bit, lan743x_tx_isr,
 602							&adapter->tx[index]);
 603			if (ret)
 604				goto clean_up;
 605			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 606			if (!(flags &
 607			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
 608				lan743x_csr_write(adapter, INT_VEC_EN_SET,
 609						  INT_VEC_EN_(vector));
 610		}
 611	}
 612	if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
 613		int number_of_rx_vectors = intr->number_of_vectors -
 614					   LAN743X_USED_TX_CHANNELS - 1;
 615
 616		if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
 617			number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
 618
 619		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 620			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 621			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 622			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 623			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 624			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 625
 626		if (adapter->csr.flags &
 627		    LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 628			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
 629				LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 630				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 631				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 632				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 633		}
 634		for (index = 0; index < number_of_rx_vectors; index++) {
 635			int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
 636			u32 int_bit = INT_BIT_DMA_RX_(index);
 637
 638			/* map RX interrupt to vector */
 639			int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
 640			lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
 641			if (flags &
 642			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
 643				int_vec_en_auto_clr |= INT_VEC_EN_(vector);
 644				lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
 645						  int_vec_en_auto_clr);
 646			}
 647
 648			/* Remove RX interrupt from shared mask */
 649			intr->vector_list[0].int_mask &= ~int_bit;
 650			ret = lan743x_intr_register_isr(adapter, vector, flags,
 651							int_bit, lan743x_rx_isr,
 652							&adapter->rx[index]);
 653			if (ret)
 654				goto clean_up;
 655			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 656
 657			lan743x_csr_write(adapter, INT_VEC_EN_SET,
 658					  INT_VEC_EN_(vector));
 659		}
 660	}
 661	return 0;
 662
 663clean_up:
 664	lan743x_intr_close(adapter);
 665	return ret;
 666}
 667
 668static int lan743x_dp_write(struct lan743x_adapter *adapter,
 669			    u32 select, u32 addr, u32 length, u32 *buf)
 670{
 671	u32 dp_sel;
 672	int i;
 673
 674	if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
 675				     1, 40, 100, 100))
 676		return -EIO;
 677	dp_sel = lan743x_csr_read(adapter, DP_SEL);
 678	dp_sel &= ~DP_SEL_MASK_;
 679	dp_sel |= select;
 680	lan743x_csr_write(adapter, DP_SEL, dp_sel);
 681
 682	for (i = 0; i < length; i++) {
 683		lan743x_csr_write(adapter, DP_ADDR, addr + i);
 684		lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
 685		lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
 686		if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
 687					     1, 40, 100, 100))
 688			return -EIO;
 689	}
 690
 691	return 0;
 692}
 693
 694static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
 695{
 696	u32 ret;
 697
 698	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
 699		MAC_MII_ACC_PHY_ADDR_MASK_;
 700	ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
 701		MAC_MII_ACC_MIIRINDA_MASK_;
 702
 703	if (read)
 704		ret |= MAC_MII_ACC_MII_READ_;
 705	else
 706		ret |= MAC_MII_ACC_MII_WRITE_;
 707	ret |= MAC_MII_ACC_MII_BUSY_;
 708
 709	return ret;
 710}
 711
 712static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
 713{
 714	u32 data;
 715
 716	return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
 717				  !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
 718}
 719
 720static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
 721{
 722	struct lan743x_adapter *adapter = bus->priv;
 723	u32 val, mii_access;
 724	int ret;
 725
 726	/* comfirm MII not busy */
 727	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 728	if (ret < 0)
 729		return ret;
 730
 731	/* set the address, index & direction (read from PHY) */
 732	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
 733	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 734	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 735	if (ret < 0)
 736		return ret;
 737
 738	val = lan743x_csr_read(adapter, MAC_MII_DATA);
 739	return (int)(val & 0xFFFF);
 740}
 741
 742static int lan743x_mdiobus_write(struct mii_bus *bus,
 743				 int phy_id, int index, u16 regval)
 744{
 745	struct lan743x_adapter *adapter = bus->priv;
 746	u32 val, mii_access;
 747	int ret;
 748
 749	/* confirm MII not busy */
 750	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 751	if (ret < 0)
 752		return ret;
 753	val = (u32)regval;
 754	lan743x_csr_write(adapter, MAC_MII_DATA, val);
 755
 756	/* set the address, index & direction (write to PHY) */
 757	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
 758	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 759	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 760	return ret;
 761}
 762
 763static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
 764				    u8 *addr)
 765{
 766	u32 addr_lo, addr_hi;
 767
 768	addr_lo = addr[0] |
 769		addr[1] << 8 |
 770		addr[2] << 16 |
 771		addr[3] << 24;
 772	addr_hi = addr[4] |
 773		addr[5] << 8;
 774	lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
 775	lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
 776
 777	ether_addr_copy(adapter->mac_address, addr);
 778	netif_info(adapter, drv, adapter->netdev,
 779		   "MAC address set to %pM\n", addr);
 780}
 781
 782static int lan743x_mac_init(struct lan743x_adapter *adapter)
 783{
 784	bool mac_address_valid = true;
 785	struct net_device *netdev;
 786	u32 mac_addr_hi = 0;
 787	u32 mac_addr_lo = 0;
 788	u32 data;
 789
 790	netdev = adapter->netdev;
 791
 792	/* disable auto duplex, and speed detection. Phylib does that */
 793	data = lan743x_csr_read(adapter, MAC_CR);
 794	data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
 795	data |= MAC_CR_CNTR_RST_;
 796	lan743x_csr_write(adapter, MAC_CR, data);
 797
 798	if (!is_valid_ether_addr(adapter->mac_address)) {
 799		mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
 800		mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
 801		adapter->mac_address[0] = mac_addr_lo & 0xFF;
 802		adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
 803		adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
 804		adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
 805		adapter->mac_address[4] = mac_addr_hi & 0xFF;
 806		adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
 807
 808		if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
 809		    mac_addr_lo == 0xFFFFFFFF) {
 810			mac_address_valid = false;
 811		} else if (!is_valid_ether_addr(adapter->mac_address)) {
 812			mac_address_valid = false;
 813		}
 814
 815		if (!mac_address_valid)
 816			eth_random_addr(adapter->mac_address);
 817	}
 818	lan743x_mac_set_address(adapter, adapter->mac_address);
 819	ether_addr_copy(netdev->dev_addr, adapter->mac_address);
 820
 821	return 0;
 822}
 823
 824static int lan743x_mac_open(struct lan743x_adapter *adapter)
 825{
 826	u32 temp;
 827
 828	temp = lan743x_csr_read(adapter, MAC_RX);
 829	lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
 830	temp = lan743x_csr_read(adapter, MAC_TX);
 831	lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
 832	return 0;
 833}
 834
 835static void lan743x_mac_close(struct lan743x_adapter *adapter)
 836{
 837	u32 temp;
 838
 839	temp = lan743x_csr_read(adapter, MAC_TX);
 840	temp &= ~MAC_TX_TXEN_;
 841	lan743x_csr_write(adapter, MAC_TX, temp);
 842	lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
 843				 1, 1000, 20000, 100);
 844
 845	temp = lan743x_csr_read(adapter, MAC_RX);
 846	temp &= ~MAC_RX_RXEN_;
 847	lan743x_csr_write(adapter, MAC_RX, temp);
 848	lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
 849				 1, 1000, 20000, 100);
 850}
 851
 852static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
 853					      bool tx_enable, bool rx_enable)
 854{
 855	u32 flow_setting = 0;
 856
 857	/* set maximum pause time because when fifo space frees
 858	 * up a zero value pause frame will be sent to release the pause
 859	 */
 860	flow_setting = MAC_FLOW_CR_FCPT_MASK_;
 861	if (tx_enable)
 862		flow_setting |= MAC_FLOW_CR_TX_FCEN_;
 863	if (rx_enable)
 864		flow_setting |= MAC_FLOW_CR_RX_FCEN_;
 865	lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
 866}
 867
 868static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
 869{
 870	int enabled = 0;
 871	u32 mac_rx = 0;
 872
 873	mac_rx = lan743x_csr_read(adapter, MAC_RX);
 874	if (mac_rx & MAC_RX_RXEN_) {
 875		enabled = 1;
 876		if (mac_rx & MAC_RX_RXD_) {
 877			lan743x_csr_write(adapter, MAC_RX, mac_rx);
 878			mac_rx &= ~MAC_RX_RXD_;
 879		}
 880		mac_rx &= ~MAC_RX_RXEN_;
 881		lan743x_csr_write(adapter, MAC_RX, mac_rx);
 882		lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
 883					 1, 1000, 20000, 100);
 884		lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
 885	}
 886
 887	mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
 888	mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
 889		  << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
 890	lan743x_csr_write(adapter, MAC_RX, mac_rx);
 891
 892	if (enabled) {
 893		mac_rx |= MAC_RX_RXEN_;
 894		lan743x_csr_write(adapter, MAC_RX, mac_rx);
 895	}
 896	return 0;
 897}
 898
 899/* PHY */
 900static int lan743x_phy_reset(struct lan743x_adapter *adapter)
 901{
 902	u32 data;
 903
 904	/* Only called with in probe, and before mdiobus_register */
 905
 906	data = lan743x_csr_read(adapter, PMT_CTL);
 907	data |= PMT_CTL_ETH_PHY_RST_;
 908	lan743x_csr_write(adapter, PMT_CTL, data);
 909
 910	return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
 911				  (!(data & PMT_CTL_ETH_PHY_RST_) &&
 912				  (data & PMT_CTL_READY_)),
 913				  50000, 1000000);
 914}
 915
 916static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
 917					   u8 duplex, u16 local_adv,
 918					   u16 remote_adv)
 919{
 920	struct lan743x_phy *phy = &adapter->phy;
 921	u8 cap;
 922
 923	if (phy->fc_autoneg)
 924		cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
 925	else
 926		cap = phy->fc_request_control;
 927
 928	lan743x_mac_flow_ctrl_set_enables(adapter,
 929					  cap & FLOW_CTRL_TX,
 930					  cap & FLOW_CTRL_RX);
 931}
 932
 933static int lan743x_phy_init(struct lan743x_adapter *adapter)
 934{
 935	return lan743x_phy_reset(adapter);
 936}
 937
 938static void lan743x_phy_link_status_change(struct net_device *netdev)
 939{
 940	struct lan743x_adapter *adapter = netdev_priv(netdev);
 941	struct phy_device *phydev = netdev->phydev;
 942	u32 data;
 943
 944	phy_print_status(phydev);
 945	if (phydev->state == PHY_RUNNING) {
 946		struct ethtool_link_ksettings ksettings;
 947		int remote_advertisement = 0;
 948		int local_advertisement = 0;
 949
 950		data = lan743x_csr_read(adapter, MAC_CR);
 951
 952		/* set interface mode */
 953		if (phy_interface_is_rgmii(phydev))
 954			/* RGMII */
 955			data &= ~MAC_CR_MII_EN_;
 956		else
 957			/* GMII */
 958			data |= MAC_CR_MII_EN_;
 959
 960		/* set duplex mode */
 961		if (phydev->duplex)
 962			data |= MAC_CR_DPX_;
 963		else
 964			data &= ~MAC_CR_DPX_;
 965
 966		/* set bus speed */
 967		switch (phydev->speed) {
 968		case SPEED_10:
 969			data &= ~MAC_CR_CFG_H_;
 970			data &= ~MAC_CR_CFG_L_;
 971		break;
 972		case SPEED_100:
 973			data &= ~MAC_CR_CFG_H_;
 974			data |= MAC_CR_CFG_L_;
 975		break;
 976		case SPEED_1000:
 977			data |= MAC_CR_CFG_H_;
 978			data &= ~MAC_CR_CFG_L_;
 979		break;
 980		}
 981		lan743x_csr_write(adapter, MAC_CR, data);
 982
 983		memset(&ksettings, 0, sizeof(ksettings));
 984		phy_ethtool_get_link_ksettings(netdev, &ksettings);
 985		local_advertisement =
 986			linkmode_adv_to_mii_adv_t(phydev->advertising);
 987		remote_advertisement =
 988			linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
 989
 990		lan743x_phy_update_flowcontrol(adapter,
 991					       ksettings.base.duplex,
 992					       local_advertisement,
 993					       remote_advertisement);
 994		lan743x_ptp_update_latency(adapter, ksettings.base.speed);
 995	}
 996}
 997
 998static void lan743x_phy_close(struct lan743x_adapter *adapter)
 999{
1000	struct net_device *netdev = adapter->netdev;
1001
1002	phy_stop(netdev->phydev);
1003	phy_disconnect(netdev->phydev);
1004	netdev->phydev = NULL;
1005}
1006
1007static int lan743x_phy_open(struct lan743x_adapter *adapter)
1008{
1009	struct net_device *netdev = adapter->netdev;
1010	struct lan743x_phy *phy = &adapter->phy;
1011	struct phy_device *phydev;
1012	int ret = -EIO;
1013
1014	/* try devicetree phy, or fixed link */
1015	phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
1016					lan743x_phy_link_status_change);
1017
1018	if (!phydev) {
1019		/* try internal phy */
1020		phydev = phy_find_first(adapter->mdiobus);
1021		if (!phydev)
1022			goto return_error;
1023
1024		ret = phy_connect_direct(netdev, phydev,
1025					 lan743x_phy_link_status_change,
1026					 PHY_INTERFACE_MODE_GMII);
1027		if (ret)
1028			goto return_error;
1029	}
1030
1031	/* MAC doesn't support 1000T Half */
1032	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1033
1034	/* support both flow controls */
1035	phy_support_asym_pause(phydev);
1036	phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1037	phy->fc_autoneg = phydev->autoneg;
1038
1039	phy_start(phydev);
1040	phy_start_aneg(phydev);
1041	phy_attached_info(phydev);
1042	return 0;
1043
1044return_error:
1045	return ret;
1046}
1047
1048static void lan743x_rfe_open(struct lan743x_adapter *adapter)
1049{
1050	lan743x_csr_write(adapter, RFE_RSS_CFG,
1051		RFE_RSS_CFG_UDP_IPV6_EX_ |
1052		RFE_RSS_CFG_TCP_IPV6_EX_ |
1053		RFE_RSS_CFG_IPV6_EX_ |
1054		RFE_RSS_CFG_UDP_IPV6_ |
1055		RFE_RSS_CFG_TCP_IPV6_ |
1056		RFE_RSS_CFG_IPV6_ |
1057		RFE_RSS_CFG_UDP_IPV4_ |
1058		RFE_RSS_CFG_TCP_IPV4_ |
1059		RFE_RSS_CFG_IPV4_ |
1060		RFE_RSS_CFG_VALID_HASH_BITS_ |
1061		RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
1062		RFE_RSS_CFG_RSS_HASH_STORE_ |
1063		RFE_RSS_CFG_RSS_ENABLE_);
1064}
1065
1066static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
1067{
1068	u8 *mac_addr;
1069	u32 mac_addr_hi = 0;
1070	u32 mac_addr_lo = 0;
1071
1072	/* Add mac address to perfect Filter */
1073	mac_addr = adapter->mac_address;
1074	mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
1075		      (((u32)(mac_addr[1])) << 8) |
1076		      (((u32)(mac_addr[2])) << 16) |
1077		      (((u32)(mac_addr[3])) << 24));
1078	mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
1079		      (((u32)(mac_addr[5])) << 8));
1080
1081	lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
1082	lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
1083			  mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
1084}
1085
1086static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
1087{
1088	struct net_device *netdev = adapter->netdev;
1089	u32 hash_table[DP_SEL_VHF_HASH_LEN];
1090	u32 rfctl;
1091	u32 data;
1092
1093	rfctl = lan743x_csr_read(adapter, RFE_CTL);
1094	rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
1095		 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1096	rfctl |= RFE_CTL_AB_;
1097	if (netdev->flags & IFF_PROMISC) {
1098		rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
1099	} else {
1100		if (netdev->flags & IFF_ALLMULTI)
1101			rfctl |= RFE_CTL_AM_;
1102	}
1103
1104	memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
1105	if (netdev_mc_count(netdev)) {
1106		struct netdev_hw_addr *ha;
1107		int i;
1108
1109		rfctl |= RFE_CTL_DA_PERFECT_;
1110		i = 1;
1111		netdev_for_each_mc_addr(ha, netdev) {
1112			/* set first 32 into Perfect Filter */
1113			if (i < 33) {
1114				lan743x_csr_write(adapter,
1115						  RFE_ADDR_FILT_HI(i), 0);
1116				data = ha->addr[3];
1117				data = ha->addr[2] | (data << 8);
1118				data = ha->addr[1] | (data << 8);
1119				data = ha->addr[0] | (data << 8);
1120				lan743x_csr_write(adapter,
1121						  RFE_ADDR_FILT_LO(i), data);
1122				data = ha->addr[5];
1123				data = ha->addr[4] | (data << 8);
1124				data |= RFE_ADDR_FILT_HI_VALID_;
1125				lan743x_csr_write(adapter,
1126						  RFE_ADDR_FILT_HI(i), data);
1127			} else {
1128				u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
1129					     23) & 0x1FF;
1130				hash_table[bitnum / 32] |= (1 << (bitnum % 32));
1131				rfctl |= RFE_CTL_MCAST_HASH_;
1132			}
1133			i++;
1134		}
1135	}
1136
1137	lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
1138			 DP_SEL_VHF_VLAN_LEN,
1139			 DP_SEL_VHF_HASH_LEN, hash_table);
1140	lan743x_csr_write(adapter, RFE_CTL, rfctl);
1141}
1142
1143static int lan743x_dmac_init(struct lan743x_adapter *adapter)
1144{
1145	u32 data = 0;
1146
1147	lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
1148	lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
1149				 0, 1000, 20000, 100);
1150	switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
1151	case DMA_DESCRIPTOR_SPACING_16:
1152		data = DMAC_CFG_MAX_DSPACE_16_;
1153		break;
1154	case DMA_DESCRIPTOR_SPACING_32:
1155		data = DMAC_CFG_MAX_DSPACE_32_;
1156		break;
1157	case DMA_DESCRIPTOR_SPACING_64:
1158		data = DMAC_CFG_MAX_DSPACE_64_;
1159		break;
1160	case DMA_DESCRIPTOR_SPACING_128:
1161		data = DMAC_CFG_MAX_DSPACE_128_;
1162		break;
1163	default:
1164		return -EPERM;
1165	}
1166	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1167		data |= DMAC_CFG_COAL_EN_;
1168	data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
1169	data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
1170	lan743x_csr_write(adapter, DMAC_CFG, data);
1171	data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
1172	data |= DMAC_COAL_CFG_TIMER_TX_START_;
1173	data |= DMAC_COAL_CFG_FLUSH_INTS_;
1174	data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
1175	data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
1176	data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
1177	data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
1178	lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
1179	data = DMAC_OBFF_TX_THRES_SET_(0x08);
1180	data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
1181	lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
1182	return 0;
1183}
1184
1185static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
1186				     int tx_channel)
1187{
1188	u32 dmac_cmd = 0;
1189
1190	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1191	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1192				      DMAC_CMD_START_T_(tx_channel)),
1193				      (dmac_cmd &
1194				      DMAC_CMD_STOP_T_(tx_channel)));
1195}
1196
1197static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
1198					     int tx_channel)
1199{
1200	int timeout = 100;
1201	int result = 0;
1202
1203	while (timeout &&
1204	       ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
1205	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1206		usleep_range(1000, 20000);
1207		timeout--;
1208	}
1209	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1210		result = -ENODEV;
1211	return result;
1212}
1213
1214static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
1215				     int rx_channel)
1216{
1217	u32 dmac_cmd = 0;
1218
1219	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1220	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1221				      DMAC_CMD_START_R_(rx_channel)),
1222				      (dmac_cmd &
1223				      DMAC_CMD_STOP_R_(rx_channel)));
1224}
1225
1226static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
1227					     int rx_channel)
1228{
1229	int timeout = 100;
1230	int result = 0;
1231
1232	while (timeout &&
1233	       ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
1234	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1235		usleep_range(1000, 20000);
1236		timeout--;
1237	}
1238	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1239		result = -ENODEV;
1240	return result;
1241}
1242
1243static void lan743x_tx_release_desc(struct lan743x_tx *tx,
1244				    int descriptor_index, bool cleanup)
1245{
1246	struct lan743x_tx_buffer_info *buffer_info = NULL;
1247	struct lan743x_tx_descriptor *descriptor = NULL;
1248	u32 descriptor_type = 0;
1249	bool ignore_sync;
1250
1251	descriptor = &tx->ring_cpu_ptr[descriptor_index];
1252	buffer_info = &tx->buffer_info[descriptor_index];
1253	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
1254		goto done;
1255
1256	descriptor_type = le32_to_cpu(descriptor->data0) &
1257			  TX_DESC_DATA0_DTYPE_MASK_;
1258	if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
1259		goto clean_up_data_descriptor;
1260	else
1261		goto clear_active;
1262
1263clean_up_data_descriptor:
1264	if (buffer_info->dma_ptr) {
1265		if (buffer_info->flags &
1266		    TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
1267			dma_unmap_page(&tx->adapter->pdev->dev,
1268				       buffer_info->dma_ptr,
1269				       buffer_info->buffer_length,
1270				       DMA_TO_DEVICE);
1271		} else {
1272			dma_unmap_single(&tx->adapter->pdev->dev,
1273					 buffer_info->dma_ptr,
1274					 buffer_info->buffer_length,
1275					 DMA_TO_DEVICE);
1276		}
1277		buffer_info->dma_ptr = 0;
1278		buffer_info->buffer_length = 0;
1279	}
1280	if (!buffer_info->skb)
1281		goto clear_active;
1282
1283	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
1284		dev_kfree_skb_any(buffer_info->skb);
1285		goto clear_skb;
1286	}
1287
1288	if (cleanup) {
1289		lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
1290		dev_kfree_skb_any(buffer_info->skb);
1291	} else {
1292		ignore_sync = (buffer_info->flags &
1293			       TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
1294		lan743x_ptp_tx_timestamp_skb(tx->adapter,
1295					     buffer_info->skb, ignore_sync);
1296	}
1297
1298clear_skb:
1299	buffer_info->skb = NULL;
1300
1301clear_active:
1302	buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
1303
1304done:
1305	memset(buffer_info, 0, sizeof(*buffer_info));
1306	memset(descriptor, 0, sizeof(*descriptor));
1307}
1308
1309static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
1310{
1311	return ((++index) % tx->ring_size);
1312}
1313
1314static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
1315{
1316	while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
1317		lan743x_tx_release_desc(tx, tx->last_head, false);
1318		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1319	}
1320}
1321
1322static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
1323{
1324	u32 original_head = 0;
1325
1326	original_head = tx->last_head;
1327	do {
1328		lan743x_tx_release_desc(tx, tx->last_head, true);
1329		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1330	} while (tx->last_head != original_head);
1331	memset(tx->ring_cpu_ptr, 0,
1332	       sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
1333	memset(tx->buffer_info, 0,
1334	       sizeof(*tx->buffer_info) * (tx->ring_size));
1335}
1336
1337static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
1338				   struct sk_buff *skb)
1339{
1340	int result = 1; /* 1 for the main skb buffer */
1341	int nr_frags = 0;
1342
1343	if (skb_is_gso(skb))
1344		result++; /* requires an extension descriptor */
1345	nr_frags = skb_shinfo(skb)->nr_frags;
1346	result += nr_frags; /* 1 for each fragment buffer */
1347	return result;
1348}
1349
1350static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
1351{
1352	int last_head = tx->last_head;
1353	int last_tail = tx->last_tail;
1354
1355	if (last_tail >= last_head)
1356		return tx->ring_size - last_tail + last_head - 1;
1357	else
1358		return last_head - last_tail - 1;
1359}
1360
1361void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
1362				      bool enable_timestamping,
1363				      bool enable_onestep_sync)
1364{
1365	if (enable_timestamping)
1366		tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
1367	else
1368		tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
1369	if (enable_onestep_sync)
1370		tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
1371	else
1372		tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
1373}
1374
1375static int lan743x_tx_frame_start(struct lan743x_tx *tx,
1376				  unsigned char *first_buffer,
1377				  unsigned int first_buffer_length,
1378				  unsigned int frame_length,
1379				  bool time_stamp,
1380				  bool check_sum)
1381{
1382	/* called only from within lan743x_tx_xmit_frame.
1383	 * assuming tx->ring_lock has already been acquired.
1384	 */
1385	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1386	struct lan743x_tx_buffer_info *buffer_info = NULL;
1387	struct lan743x_adapter *adapter = tx->adapter;
1388	struct device *dev = &adapter->pdev->dev;
1389	dma_addr_t dma_ptr;
1390
1391	tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
1392	tx->frame_first = tx->last_tail;
1393	tx->frame_tail = tx->frame_first;
1394
1395	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1396	buffer_info = &tx->buffer_info[tx->frame_tail];
1397	dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
1398				 DMA_TO_DEVICE);
1399	if (dma_mapping_error(dev, dma_ptr))
1400		return -ENOMEM;
1401
1402	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1403	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1404	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1405		TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1406
1407	buffer_info->skb = NULL;
1408	buffer_info->dma_ptr = dma_ptr;
1409	buffer_info->buffer_length = first_buffer_length;
1410	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1411
1412	tx->frame_data0 = (first_buffer_length &
1413		TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1414		TX_DESC_DATA0_DTYPE_DATA_ |
1415		TX_DESC_DATA0_FS_ |
1416		TX_DESC_DATA0_FCS_;
1417	if (time_stamp)
1418		tx->frame_data0 |= TX_DESC_DATA0_TSE_;
1419
1420	if (check_sum)
1421		tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
1422				   TX_DESC_DATA0_IPE_ |
1423				   TX_DESC_DATA0_TPE_;
1424
1425	/* data0 will be programmed in one of other frame assembler functions */
1426	return 0;
1427}
1428
1429static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
1430				     unsigned int frame_length,
1431				     int nr_frags)
1432{
1433	/* called only from within lan743x_tx_xmit_frame.
1434	 * assuming tx->ring_lock has already been acquired.
1435	 */
1436	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1437	struct lan743x_tx_buffer_info *buffer_info = NULL;
1438
1439	/* wrap up previous descriptor */
1440	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
1441	if (nr_frags <= 0) {
1442		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1443		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1444	}
1445	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1446	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1447
1448	/* move to next descriptor */
1449	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1450	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1451	buffer_info = &tx->buffer_info[tx->frame_tail];
1452
1453	/* add extension descriptor */
1454	tx_descriptor->data1 = 0;
1455	tx_descriptor->data2 = 0;
1456	tx_descriptor->data3 = 0;
1457
1458	buffer_info->skb = NULL;
1459	buffer_info->dma_ptr = 0;
1460	buffer_info->buffer_length = 0;
1461	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1462
1463	tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
1464			  TX_DESC_DATA0_DTYPE_EXT_ |
1465			  TX_DESC_DATA0_EXT_LSO_;
1466
1467	/* data0 will be programmed in one of other frame assembler functions */
1468}
1469
1470static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
1471					 const skb_frag_t *fragment,
1472					 unsigned int frame_length)
1473{
1474	/* called only from within lan743x_tx_xmit_frame
1475	 * assuming tx->ring_lock has already been acquired
1476	 */
1477	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1478	struct lan743x_tx_buffer_info *buffer_info = NULL;
1479	struct lan743x_adapter *adapter = tx->adapter;
1480	struct device *dev = &adapter->pdev->dev;
1481	unsigned int fragment_length = 0;
1482	dma_addr_t dma_ptr;
1483
1484	fragment_length = skb_frag_size(fragment);
1485	if (!fragment_length)
1486		return 0;
1487
1488	/* wrap up previous descriptor */
1489	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1490	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1491
1492	/* move to next descriptor */
1493	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1494	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1495	buffer_info = &tx->buffer_info[tx->frame_tail];
1496	dma_ptr = skb_frag_dma_map(dev, fragment,
1497				   0, fragment_length,
1498				   DMA_TO_DEVICE);
1499	if (dma_mapping_error(dev, dma_ptr)) {
1500		int desc_index;
1501
1502		/* cleanup all previously setup descriptors */
1503		desc_index = tx->frame_first;
1504		while (desc_index != tx->frame_tail) {
1505			lan743x_tx_release_desc(tx, desc_index, true);
1506			desc_index = lan743x_tx_next_index(tx, desc_index);
1507		}
1508		dma_wmb();
1509		tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1510		tx->frame_first = 0;
1511		tx->frame_data0 = 0;
1512		tx->frame_tail = 0;
1513		return -ENOMEM;
1514	}
1515
1516	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1517	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1518	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1519			       TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1520
1521	buffer_info->skb = NULL;
1522	buffer_info->dma_ptr = dma_ptr;
1523	buffer_info->buffer_length = fragment_length;
1524	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1525	buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
1526
1527	tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1528			  TX_DESC_DATA0_DTYPE_DATA_ |
1529			  TX_DESC_DATA0_FCS_;
1530
1531	/* data0 will be programmed in one of other frame assembler functions */
1532	return 0;
1533}
1534
1535static void lan743x_tx_frame_end(struct lan743x_tx *tx,
1536				 struct sk_buff *skb,
1537				 bool time_stamp,
1538				 bool ignore_sync)
1539{
1540	/* called only from within lan743x_tx_xmit_frame
1541	 * assuming tx->ring_lock has already been acquired
1542	 */
1543	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1544	struct lan743x_tx_buffer_info *buffer_info = NULL;
1545	struct lan743x_adapter *adapter = tx->adapter;
1546	u32 tx_tail_flags = 0;
1547
1548	/* wrap up previous descriptor */
1549	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
1550	    TX_DESC_DATA0_DTYPE_DATA_) {
1551		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1552		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1553	}
1554
1555	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1556	buffer_info = &tx->buffer_info[tx->frame_tail];
1557	buffer_info->skb = skb;
1558	if (time_stamp)
1559		buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
1560	if (ignore_sync)
1561		buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
1562
1563	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
1564	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1565	tx->last_tail = tx->frame_tail;
1566
1567	dma_wmb();
1568
1569	if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
1570		tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
1571	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
1572		tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
1573		TX_TAIL_SET_TOP_INT_EN_;
1574
1575	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1576			  tx_tail_flags | tx->frame_tail);
1577	tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1578}
1579
1580static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
1581					 struct sk_buff *skb)
1582{
1583	int required_number_of_descriptors = 0;
1584	unsigned int start_frame_length = 0;
1585	unsigned int frame_length = 0;
1586	unsigned int head_length = 0;
1587	unsigned long irq_flags = 0;
1588	bool do_timestamp = false;
1589	bool ignore_sync = false;
1590	int nr_frags = 0;
1591	bool gso = false;
1592	int j;
1593
1594	required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
1595
1596	spin_lock_irqsave(&tx->ring_lock, irq_flags);
1597	if (required_number_of_descriptors >
1598		lan743x_tx_get_avail_desc(tx)) {
1599		if (required_number_of_descriptors > (tx->ring_size - 1)) {
1600			dev_kfree_skb_irq(skb);
1601		} else {
1602			/* save to overflow buffer */
1603			tx->overflow_skb = skb;
1604			netif_stop_queue(tx->adapter->netdev);
1605		}
1606		goto unlock;
1607	}
1608
1609	/* space available, transmit skb  */
1610	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1611	    (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
1612	    (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
1613		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1614		do_timestamp = true;
1615		if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
1616			ignore_sync = true;
1617	}
1618	head_length = skb_headlen(skb);
1619	frame_length = skb_pagelen(skb);
1620	nr_frags = skb_shinfo(skb)->nr_frags;
1621	start_frame_length = frame_length;
1622	gso = skb_is_gso(skb);
1623	if (gso) {
1624		start_frame_length = max(skb_shinfo(skb)->gso_size,
1625					 (unsigned short)8);
1626	}
1627
1628	if (lan743x_tx_frame_start(tx,
1629				   skb->data, head_length,
1630				   start_frame_length,
1631				   do_timestamp,
1632				   skb->ip_summed == CHECKSUM_PARTIAL)) {
1633		dev_kfree_skb_irq(skb);
1634		goto unlock;
1635	}
1636
1637	if (gso)
1638		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
1639
1640	if (nr_frags <= 0)
1641		goto finish;
1642
1643	for (j = 0; j < nr_frags; j++) {
1644		const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
1645
1646		if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
1647			/* upon error no need to call
1648			 *	lan743x_tx_frame_end
1649			 * frame assembler clean up was performed inside
1650			 *	lan743x_tx_frame_add_fragment
1651			 */
1652			dev_kfree_skb_irq(skb);
1653			goto unlock;
1654		}
1655	}
1656
1657finish:
1658	lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
1659
1660unlock:
1661	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
1662	return NETDEV_TX_OK;
1663}
1664
1665static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
1666{
1667	struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
1668	struct lan743x_adapter *adapter = tx->adapter;
1669	bool start_transmitter = false;
1670	unsigned long irq_flags = 0;
1671	u32 ioc_bit = 0;
1672
1673	ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
1674	lan743x_csr_read(adapter, DMAC_INT_STS);
1675	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
1676		lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
1677	spin_lock_irqsave(&tx->ring_lock, irq_flags);
1678
1679	/* clean up tx ring */
1680	lan743x_tx_release_completed_descriptors(tx);
1681	if (netif_queue_stopped(adapter->netdev)) {
1682		if (tx->overflow_skb) {
1683			if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
1684				lan743x_tx_get_avail_desc(tx))
1685				start_transmitter = true;
1686		} else {
1687			netif_wake_queue(adapter->netdev);
1688		}
1689	}
1690	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
1691
1692	if (start_transmitter) {
1693		/* space is now available, transmit overflow skb */
1694		lan743x_tx_xmit_frame(tx, tx->overflow_skb);
1695		tx->overflow_skb = NULL;
1696		netif_wake_queue(adapter->netdev);
1697	}
1698
1699	if (!napi_complete(napi))
1700		goto done;
1701
1702	/* enable isr */
1703	lan743x_csr_write(adapter, INT_EN_SET,
1704			  INT_BIT_DMA_TX_(tx->channel_number));
1705	lan743x_csr_read(adapter, INT_STS);
1706
1707done:
1708	return 0;
1709}
1710
1711static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
1712{
1713	if (tx->head_cpu_ptr) {
1714		dma_free_coherent(&tx->adapter->pdev->dev,
1715				  sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr,
1716				  tx->head_dma_ptr);
1717		tx->head_cpu_ptr = NULL;
1718		tx->head_dma_ptr = 0;
1719	}
1720	kfree(tx->buffer_info);
1721	tx->buffer_info = NULL;
1722
1723	if (tx->ring_cpu_ptr) {
1724		dma_free_coherent(&tx->adapter->pdev->dev,
1725				  tx->ring_allocation_size, tx->ring_cpu_ptr,
1726				  tx->ring_dma_ptr);
1727		tx->ring_allocation_size = 0;
1728		tx->ring_cpu_ptr = NULL;
1729		tx->ring_dma_ptr = 0;
1730	}
1731	tx->ring_size = 0;
1732}
1733
1734static int lan743x_tx_ring_init(struct lan743x_tx *tx)
1735{
1736	size_t ring_allocation_size = 0;
1737	void *cpu_ptr = NULL;
1738	dma_addr_t dma_ptr;
1739	int ret = -ENOMEM;
1740
1741	tx->ring_size = LAN743X_TX_RING_SIZE;
1742	if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
1743		ret = -EINVAL;
1744		goto cleanup;
1745	}
1746	ring_allocation_size = ALIGN(tx->ring_size *
1747				     sizeof(struct lan743x_tx_descriptor),
1748				     PAGE_SIZE);
1749	dma_ptr = 0;
1750	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
1751				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
1752	if (!cpu_ptr) {
1753		ret = -ENOMEM;
1754		goto cleanup;
1755	}
1756
1757	tx->ring_allocation_size = ring_allocation_size;
1758	tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
1759	tx->ring_dma_ptr = dma_ptr;
1760
1761	cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
1762	if (!cpu_ptr) {
1763		ret = -ENOMEM;
1764		goto cleanup;
1765	}
1766	tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
1767	dma_ptr = 0;
1768	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
1769				     sizeof(*tx->head_cpu_ptr), &dma_ptr,
1770				     GFP_KERNEL);
1771	if (!cpu_ptr) {
1772		ret = -ENOMEM;
1773		goto cleanup;
1774	}
1775
1776	tx->head_cpu_ptr = cpu_ptr;
1777	tx->head_dma_ptr = dma_ptr;
1778	if (tx->head_dma_ptr & 0x3) {
1779		ret = -ENOMEM;
1780		goto cleanup;
1781	}
1782
1783	return 0;
1784
1785cleanup:
1786	lan743x_tx_ring_cleanup(tx);
1787	return ret;
1788}
1789
1790static void lan743x_tx_close(struct lan743x_tx *tx)
1791{
1792	struct lan743x_adapter *adapter = tx->adapter;
1793
1794	lan743x_csr_write(adapter,
1795			  DMAC_CMD,
1796			  DMAC_CMD_STOP_T_(tx->channel_number));
1797	lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
1798
1799	lan743x_csr_write(adapter,
1800			  DMAC_INT_EN_CLR,
1801			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
1802	lan743x_csr_write(adapter, INT_EN_CLR,
1803			  INT_BIT_DMA_TX_(tx->channel_number));
1804	napi_disable(&tx->napi);
1805	netif_napi_del(&tx->napi);
1806
1807	lan743x_csr_write(adapter, FCT_TX_CTL,
1808			  FCT_TX_CTL_DIS_(tx->channel_number));
1809	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
1810				 FCT_TX_CTL_EN_(tx->channel_number),
1811				 0, 1000, 20000, 100);
1812
1813	lan743x_tx_release_all_descriptors(tx);
1814
1815	if (tx->overflow_skb) {
1816		dev_kfree_skb(tx->overflow_skb);
1817		tx->overflow_skb = NULL;
1818	}
1819
1820	lan743x_tx_ring_cleanup(tx);
1821}
1822
1823static int lan743x_tx_open(struct lan743x_tx *tx)
1824{
1825	struct lan743x_adapter *adapter = NULL;
1826	u32 data = 0;
1827	int ret;
1828
1829	adapter = tx->adapter;
1830	ret = lan743x_tx_ring_init(tx);
1831	if (ret)
1832		return ret;
1833
1834	/* initialize fifo */
1835	lan743x_csr_write(adapter, FCT_TX_CTL,
1836			  FCT_TX_CTL_RESET_(tx->channel_number));
1837	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
1838				 FCT_TX_CTL_RESET_(tx->channel_number),
1839				 0, 1000, 20000, 100);
1840
1841	/* enable fifo */
1842	lan743x_csr_write(adapter, FCT_TX_CTL,
1843			  FCT_TX_CTL_EN_(tx->channel_number));
1844
1845	/* reset tx channel */
1846	lan743x_csr_write(adapter, DMAC_CMD,
1847			  DMAC_CMD_TX_SWR_(tx->channel_number));
1848	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
1849				 DMAC_CMD_TX_SWR_(tx->channel_number),
1850				 0, 1000, 20000, 100);
1851
1852	/* Write TX_BASE_ADDR */
1853	lan743x_csr_write(adapter,
1854			  TX_BASE_ADDRH(tx->channel_number),
1855			  DMA_ADDR_HIGH32(tx->ring_dma_ptr));
1856	lan743x_csr_write(adapter,
1857			  TX_BASE_ADDRL(tx->channel_number),
1858			  DMA_ADDR_LOW32(tx->ring_dma_ptr));
1859
1860	/* Write TX_CFG_B */
1861	data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
1862	data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
1863	data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
1864	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1865		data |= TX_CFG_B_TDMABL_512_;
1866	lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
1867
1868	/* Write TX_CFG_A */
1869	data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
1870	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
1871		data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
1872		data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
1873		data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
1874		data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
1875	}
1876	lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
1877
1878	/* Write TX_HEAD_WRITEBACK_ADDR */
1879	lan743x_csr_write(adapter,
1880			  TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
1881			  DMA_ADDR_HIGH32(tx->head_dma_ptr));
1882	lan743x_csr_write(adapter,
1883			  TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
1884			  DMA_ADDR_LOW32(tx->head_dma_ptr));
1885
1886	/* set last head */
1887	tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
1888
1889	/* write TX_TAIL */
1890	tx->last_tail = 0;
1891	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1892			  (u32)(tx->last_tail));
1893	tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
1894							 INT_BIT_DMA_TX_
1895							 (tx->channel_number));
1896	netif_tx_napi_add(adapter->netdev,
1897			  &tx->napi, lan743x_tx_napi_poll,
1898			  tx->ring_size - 1);
1899	napi_enable(&tx->napi);
1900
1901	data = 0;
1902	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
1903		data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
1904	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
1905		data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
1906	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
1907		data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
1908	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
1909		data |= TX_CFG_C_TX_INT_EN_R2C_;
1910	lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
1911
1912	if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
1913		lan743x_csr_write(adapter, INT_EN_SET,
1914				  INT_BIT_DMA_TX_(tx->channel_number));
1915	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
1916			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
1917
1918	/*  start dmac channel */
1919	lan743x_csr_write(adapter, DMAC_CMD,
1920			  DMAC_CMD_START_T_(tx->channel_number));
1921	return 0;
1922}
1923
1924static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
1925{
1926	return ((++index) % rx->ring_size);
1927}
1928
1929static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
1930{
1931	/* update the tail once per 8 descriptors */
1932	if ((index & 7) == 7)
1933		lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
1934				  index);
1935}
1936
1937static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
1938{
1939	struct net_device *netdev = rx->adapter->netdev;
1940	struct device *dev = &rx->adapter->pdev->dev;
1941	struct lan743x_rx_buffer_info *buffer_info;
1942	unsigned int buffer_length, used_length;
1943	struct lan743x_rx_descriptor *descriptor;
1944	struct sk_buff *skb;
1945	dma_addr_t dma_ptr;
1946
1947	buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
1948
1949	descriptor = &rx->ring_cpu_ptr[index];
1950	buffer_info = &rx->buffer_info[index];
1951	skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
1952	if (!skb)
1953		return -ENOMEM;
1954	dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
1955	if (dma_mapping_error(dev, dma_ptr)) {
1956		dev_kfree_skb_any(skb);
1957		return -ENOMEM;
1958	}
1959	if (buffer_info->dma_ptr) {
1960		/* sync used area of buffer only */
1961		if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_)
1962			/* frame length is valid only if LS bit is set.
1963			 * it's a safe upper bound for the used area in this
1964			 * buffer.
1965			 */
1966			used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_
1967					  (le32_to_cpu(descriptor->data0)),
1968					  buffer_info->buffer_length);
1969		else
1970			used_length = buffer_info->buffer_length;
1971		dma_sync_single_for_cpu(dev, buffer_info->dma_ptr,
1972					used_length,
1973					DMA_FROM_DEVICE);
1974		dma_unmap_single_attrs(dev, buffer_info->dma_ptr,
1975				       buffer_info->buffer_length,
1976				       DMA_FROM_DEVICE,
1977				       DMA_ATTR_SKIP_CPU_SYNC);
1978	}
1979
1980	buffer_info->skb = skb;
1981	buffer_info->dma_ptr = dma_ptr;
1982	buffer_info->buffer_length = buffer_length;
1983	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
1984	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
1985	descriptor->data3 = 0;
1986	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
1987			    (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
1988	lan743x_rx_update_tail(rx, index);
1989
1990	return 0;
1991}
1992
1993static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
1994{
1995	struct lan743x_rx_buffer_info *buffer_info;
1996	struct lan743x_rx_descriptor *descriptor;
1997
1998	descriptor = &rx->ring_cpu_ptr[index];
1999	buffer_info = &rx->buffer_info[index];
2000
2001	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2002	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2003	descriptor->data3 = 0;
2004	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2005			    ((buffer_info->buffer_length) &
2006			    RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2007	lan743x_rx_update_tail(rx, index);
2008}
2009
2010static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
2011{
2012	struct lan743x_rx_buffer_info *buffer_info;
2013	struct lan743x_rx_descriptor *descriptor;
2014
2015	descriptor = &rx->ring_cpu_ptr[index];
2016	buffer_info = &rx->buffer_info[index];
2017
2018	memset(descriptor, 0, sizeof(*descriptor));
2019
2020	if (buffer_info->dma_ptr) {
2021		dma_unmap_single(&rx->adapter->pdev->dev,
2022				 buffer_info->dma_ptr,
2023				 buffer_info->buffer_length,
2024				 DMA_FROM_DEVICE);
2025		buffer_info->dma_ptr = 0;
2026	}
2027
2028	if (buffer_info->skb) {
2029		dev_kfree_skb(buffer_info->skb);
2030		buffer_info->skb = NULL;
2031	}
2032
2033	memset(buffer_info, 0, sizeof(*buffer_info));
2034}
2035
2036static struct sk_buff *
2037lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
2038{
2039	if (skb_linearize(skb)) {
2040		dev_kfree_skb_irq(skb);
2041		return NULL;
2042	}
2043	frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
2044	if (skb->len > frame_length) {
2045		skb->tail -= skb->len - frame_length;
2046		skb->len = frame_length;
2047	}
2048	return skb;
2049}
2050
2051static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
2052{
2053	int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
2054	struct lan743x_rx_descriptor *descriptor, *desc_ext;
2055	struct net_device *netdev = rx->adapter->netdev;
2056	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2057	struct lan743x_rx_buffer_info *buffer_info;
2058	int frame_length, buffer_length;
2059	int extension_index = -1;
2060	bool is_last, is_first;
2061	struct sk_buff *skb;
2062
2063	if (current_head_index < 0 || current_head_index >= rx->ring_size)
2064		goto done;
2065
2066	if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
2067		goto done;
2068
2069	if (rx->last_head == current_head_index)
2070		goto done;
2071
2072	descriptor = &rx->ring_cpu_ptr[rx->last_head];
2073	if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
2074		goto done;
2075	buffer_info = &rx->buffer_info[rx->last_head];
2076
2077	is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_;
2078	is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_;
2079
2080	if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
2081		/* extension is expected to follow */
2082		int index = lan743x_rx_next_index(rx, rx->last_head);
2083
2084		if (index == current_head_index)
2085			/* extension not yet available */
2086			goto done;
2087		desc_ext = &rx->ring_cpu_ptr[index];
2088		if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_)
2089			/* extension not yet available */
2090			goto done;
2091		if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_))
2092			goto move_forward;
2093		extension_index = index;
2094	}
2095
2096	/* Only the last buffer in a multi-buffer frame contains the total frame
2097	 * length. The chip occasionally sends more buffers than strictly
2098	 * required to reach the total frame length.
2099	 * Handle this by adding all buffers to the skb in their entirety.
2100	 * Once the real frame length is known, trim the skb.
2101	 */
2102	frame_length =
2103		RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
2104	buffer_length = buffer_info->buffer_length;
2105
2106	netdev_dbg(netdev, "%s%schunk: %d/%d",
2107		   is_first ? "first " : "      ",
2108		   is_last  ? "last  " : "      ",
2109		   frame_length, buffer_length);
2110
2111	/* save existing skb, allocate new skb and map to dma */
2112	skb = buffer_info->skb;
2113	if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
2114		/* failed to allocate next skb.
2115		 * Memory is very low.
2116		 * Drop this packet and reuse buffer.
2117		 */
2118		lan743x_rx_reuse_ring_element(rx, rx->last_head);
2119		/* drop packet that was being assembled */
2120		dev_kfree_skb_irq(rx->skb_head);
2121		rx->skb_head = NULL;
2122		goto process_extension;
2123	}
2124
2125	/* add buffers to skb via skb->frag_list */
2126	if (is_first) {
2127		skb_reserve(skb, RX_HEAD_PADDING);
2128		skb_put(skb, buffer_length - RX_HEAD_PADDING);
2129		if (rx->skb_head)
2130			dev_kfree_skb_irq(rx->skb_head);
2131		rx->skb_head = skb;
2132	} else if (rx->skb_head) {
2133		skb_put(skb, buffer_length);
2134		if (skb_shinfo(rx->skb_head)->frag_list)
2135			rx->skb_tail->next = skb;
2136		else
2137			skb_shinfo(rx->skb_head)->frag_list = skb;
2138		rx->skb_tail = skb;
2139		rx->skb_head->len += skb->len;
2140		rx->skb_head->data_len += skb->len;
2141		rx->skb_head->truesize += skb->truesize;
2142	} else {
2143		/* packet to assemble has already been dropped because one or
2144		 * more of its buffers could not be allocated
2145		 */
2146		netdev_dbg(netdev, "drop buffer intended for dropped packet");
2147		dev_kfree_skb_irq(skb);
2148	}
2149
2150process_extension:
2151	if (extension_index >= 0) {
2152		u32 ts_sec;
2153		u32 ts_nsec;
2154
2155		ts_sec = le32_to_cpu(desc_ext->data1);
2156		ts_nsec = (le32_to_cpu(desc_ext->data2) &
2157			  RX_DESC_DATA2_TS_NS_MASK_);
2158		if (rx->skb_head)
2159			skb_hwtstamps(rx->skb_head)->hwtstamp =
2160				ktime_set(ts_sec, ts_nsec);
2161		lan743x_rx_reuse_ring_element(rx, extension_index);
2162		rx->last_head = extension_index;
2163		netdev_dbg(netdev, "process extension");
2164	}
2165
2166	if (is_last && rx->skb_head)
2167		rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length);
2168
2169	if (is_last && rx->skb_head) {
2170		rx->skb_head->protocol = eth_type_trans(rx->skb_head,
2171							rx->adapter->netdev);
2172		netdev_dbg(netdev, "sending %d byte frame to OS",
2173			   rx->skb_head->len);
2174		napi_gro_receive(&rx->napi, rx->skb_head);
2175		rx->skb_head = NULL;
2176	}
2177
2178move_forward:
2179	/* push tail and head forward */
2180	rx->last_tail = rx->last_head;
2181	rx->last_head = lan743x_rx_next_index(rx, rx->last_head);
2182	result = RX_PROCESS_RESULT_BUFFER_RECEIVED;
2183done:
2184	return result;
2185}
2186
2187static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
2188{
2189	struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
2190	struct lan743x_adapter *adapter = rx->adapter;
2191	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2192	u32 rx_tail_flags = 0;
2193	int count;
2194
2195	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
2196		/* clear int status bit before reading packet */
2197		lan743x_csr_write(adapter, DMAC_INT_STS,
2198				  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2199	}
2200	for (count = 0; count < weight; count++) {
2201		result = lan743x_rx_process_buffer(rx);
2202		if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
2203			break;
2204	}
2205	rx->frame_count += count;
2206	if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED)
2207		return weight;
2208
2209	if (!napi_complete_done(napi, count))
2210		return count;
2211
2212	/* re-arm interrupts, must write to rx tail on some chip variants */
2213	if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2214		rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
2215	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
2216		rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
2217	} else {
2218		lan743x_csr_write(adapter, INT_EN_SET,
2219				  INT_BIT_DMA_RX_(rx->channel_number));
2220	}
2221
2222	if (rx_tail_flags)
2223		lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2224				  rx_tail_flags | rx->last_tail);
2225
2226	return count;
2227}
2228
2229static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
2230{
2231	if (rx->buffer_info && rx->ring_cpu_ptr) {
2232		int index;
2233
2234		for (index = 0; index < rx->ring_size; index++)
2235			lan743x_rx_release_ring_element(rx, index);
2236	}
2237
2238	if (rx->head_cpu_ptr) {
2239		dma_free_coherent(&rx->adapter->pdev->dev,
2240				  sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr,
2241				  rx->head_dma_ptr);
2242		rx->head_cpu_ptr = NULL;
2243		rx->head_dma_ptr = 0;
2244	}
2245
2246	kfree(rx->buffer_info);
2247	rx->buffer_info = NULL;
2248
2249	if (rx->ring_cpu_ptr) {
2250		dma_free_coherent(&rx->adapter->pdev->dev,
2251				  rx->ring_allocation_size, rx->ring_cpu_ptr,
2252				  rx->ring_dma_ptr);
2253		rx->ring_allocation_size = 0;
2254		rx->ring_cpu_ptr = NULL;
2255		rx->ring_dma_ptr = 0;
2256	}
2257
2258	rx->ring_size = 0;
2259	rx->last_head = 0;
2260}
2261
2262static int lan743x_rx_ring_init(struct lan743x_rx *rx)
2263{
2264	size_t ring_allocation_size = 0;
2265	dma_addr_t dma_ptr = 0;
2266	void *cpu_ptr = NULL;
2267	int ret = -ENOMEM;
2268	int index = 0;
2269
2270	rx->ring_size = LAN743X_RX_RING_SIZE;
2271	if (rx->ring_size <= 1) {
2272		ret = -EINVAL;
2273		goto cleanup;
2274	}
2275	if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
2276		ret = -EINVAL;
2277		goto cleanup;
2278	}
2279	ring_allocation_size = ALIGN(rx->ring_size *
2280				     sizeof(struct lan743x_rx_descriptor),
2281				     PAGE_SIZE);
2282	dma_ptr = 0;
2283	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2284				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2285	if (!cpu_ptr) {
2286		ret = -ENOMEM;
2287		goto cleanup;
2288	}
2289	rx->ring_allocation_size = ring_allocation_size;
2290	rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
2291	rx->ring_dma_ptr = dma_ptr;
2292
2293	cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
2294			  GFP_KERNEL);
2295	if (!cpu_ptr) {
2296		ret = -ENOMEM;
2297		goto cleanup;
2298	}
2299	rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
2300	dma_ptr = 0;
2301	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2302				     sizeof(*rx->head_cpu_ptr), &dma_ptr,
2303				     GFP_KERNEL);
2304	if (!cpu_ptr) {
2305		ret = -ENOMEM;
2306		goto cleanup;
2307	}
2308
2309	rx->head_cpu_ptr = cpu_ptr;
2310	rx->head_dma_ptr = dma_ptr;
2311	if (rx->head_dma_ptr & 0x3) {
2312		ret = -ENOMEM;
2313		goto cleanup;
2314	}
2315
2316	rx->last_head = 0;
2317	for (index = 0; index < rx->ring_size; index++) {
2318		ret = lan743x_rx_init_ring_element(rx, index);
2319		if (ret)
2320			goto cleanup;
2321	}
2322	return 0;
2323
2324cleanup:
2325	lan743x_rx_ring_cleanup(rx);
2326	return ret;
2327}
2328
2329static void lan743x_rx_close(struct lan743x_rx *rx)
2330{
2331	struct lan743x_adapter *adapter = rx->adapter;
2332
2333	lan743x_csr_write(adapter, FCT_RX_CTL,
2334			  FCT_RX_CTL_DIS_(rx->channel_number));
2335	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2336				 FCT_RX_CTL_EN_(rx->channel_number),
2337				 0, 1000, 20000, 100);
2338
2339	lan743x_csr_write(adapter, DMAC_CMD,
2340			  DMAC_CMD_STOP_R_(rx->channel_number));
2341	lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
2342
2343	lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
2344			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2345	lan743x_csr_write(adapter, INT_EN_CLR,
2346			  INT_BIT_DMA_RX_(rx->channel_number));
2347	napi_disable(&rx->napi);
2348
2349	netif_napi_del(&rx->napi);
2350
2351	lan743x_rx_ring_cleanup(rx);
2352}
2353
2354static int lan743x_rx_open(struct lan743x_rx *rx)
2355{
2356	struct lan743x_adapter *adapter = rx->adapter;
2357	u32 data = 0;
2358	int ret;
2359
2360	rx->frame_count = 0;
2361	ret = lan743x_rx_ring_init(rx);
2362	if (ret)
2363		goto return_error;
2364
2365	netif_napi_add(adapter->netdev,
2366		       &rx->napi, lan743x_rx_napi_poll,
2367		       NAPI_POLL_WEIGHT);
2368
2369	lan743x_csr_write(adapter, DMAC_CMD,
2370			  DMAC_CMD_RX_SWR_(rx->channel_number));
2371	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2372				 DMAC_CMD_RX_SWR_(rx->channel_number),
2373				 0, 1000, 20000, 100);
2374
2375	/* set ring base address */
2376	lan743x_csr_write(adapter,
2377			  RX_BASE_ADDRH(rx->channel_number),
2378			  DMA_ADDR_HIGH32(rx->ring_dma_ptr));
2379	lan743x_csr_write(adapter,
2380			  RX_BASE_ADDRL(rx->channel_number),
2381			  DMA_ADDR_LOW32(rx->ring_dma_ptr));
2382
2383	/* set rx write back address */
2384	lan743x_csr_write(adapter,
2385			  RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
2386			  DMA_ADDR_HIGH32(rx->head_dma_ptr));
2387	lan743x_csr_write(adapter,
2388			  RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
2389			  DMA_ADDR_LOW32(rx->head_dma_ptr));
2390	data = RX_CFG_A_RX_HP_WB_EN_;
2391	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2392		data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
2393			RX_CFG_A_RX_WB_THRES_SET_(0x7) |
2394			RX_CFG_A_RX_PF_THRES_SET_(16) |
2395			RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
2396	}
2397
2398	/* set RX_CFG_A */
2399	lan743x_csr_write(adapter,
2400			  RX_CFG_A(rx->channel_number), data);
2401
2402	/* set RX_CFG_B */
2403	data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
2404	data &= ~RX_CFG_B_RX_PAD_MASK_;
2405	if (!RX_HEAD_PADDING)
2406		data |= RX_CFG_B_RX_PAD_0_;
2407	else
2408		data |= RX_CFG_B_RX_PAD_2_;
2409	data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
2410	data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
2411	data |= RX_CFG_B_TS_ALL_RX_;
2412	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2413		data |= RX_CFG_B_RDMABL_512_;
2414
2415	lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
2416	rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2417							 INT_BIT_DMA_RX_
2418							 (rx->channel_number));
2419
2420	/* set RX_CFG_C */
2421	data = 0;
2422	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2423		data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
2424	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2425		data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
2426	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2427		data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
2428	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2429		data |= RX_CFG_C_RX_INT_EN_R2C_;
2430	lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
2431
2432	rx->last_tail = ((u32)(rx->ring_size - 1));
2433	lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2434			  rx->last_tail);
2435	rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
2436	if (rx->last_head) {
2437		ret = -EIO;
2438		goto napi_delete;
2439	}
2440
2441	napi_enable(&rx->napi);
2442
2443	lan743x_csr_write(adapter, INT_EN_SET,
2444			  INT_BIT_DMA_RX_(rx->channel_number));
2445	lan743x_csr_write(adapter, DMAC_INT_STS,
2446			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2447	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2448			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2449	lan743x_csr_write(adapter, DMAC_CMD,
2450			  DMAC_CMD_START_R_(rx->channel_number));
2451
2452	/* initialize fifo */
2453	lan743x_csr_write(adapter, FCT_RX_CTL,
2454			  FCT_RX_CTL_RESET_(rx->channel_number));
2455	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2456				 FCT_RX_CTL_RESET_(rx->channel_number),
2457				 0, 1000, 20000, 100);
2458	lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
2459			  FCT_FLOW_CTL_REQ_EN_ |
2460			  FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
2461			  FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
2462
2463	/* enable fifo */
2464	lan743x_csr_write(adapter, FCT_RX_CTL,
2465			  FCT_RX_CTL_EN_(rx->channel_number));
2466	return 0;
2467
2468napi_delete:
2469	netif_napi_del(&rx->napi);
2470	lan743x_rx_ring_cleanup(rx);
2471
2472return_error:
2473	return ret;
2474}
2475
2476static int lan743x_netdev_close(struct net_device *netdev)
2477{
2478	struct lan743x_adapter *adapter = netdev_priv(netdev);
2479	int index;
2480
2481	lan743x_tx_close(&adapter->tx[0]);
2482
2483	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
2484		lan743x_rx_close(&adapter->rx[index]);
2485
2486	lan743x_ptp_close(adapter);
2487
2488	lan743x_phy_close(adapter);
2489
2490	lan743x_mac_close(adapter);
2491
2492	lan743x_intr_close(adapter);
2493
2494	return 0;
2495}
2496
2497static int lan743x_netdev_open(struct net_device *netdev)
2498{
2499	struct lan743x_adapter *adapter = netdev_priv(netdev);
2500	int index;
2501	int ret;
2502
2503	ret = lan743x_intr_open(adapter);
2504	if (ret)
2505		goto return_error;
2506
2507	ret = lan743x_mac_open(adapter);
2508	if (ret)
2509		goto close_intr;
2510
2511	ret = lan743x_phy_open(adapter);
2512	if (ret)
2513		goto close_mac;
2514
2515	ret = lan743x_ptp_open(adapter);
2516	if (ret)
2517		goto close_phy;
2518
2519	lan743x_rfe_open(adapter);
2520
2521	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2522		ret = lan743x_rx_open(&adapter->rx[index]);
2523		if (ret)
2524			goto close_rx;
2525	}
2526
2527	ret = lan743x_tx_open(&adapter->tx[0]);
2528	if (ret)
2529		goto close_rx;
2530
2531	return 0;
2532
2533close_rx:
2534	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2535		if (adapter->rx[index].ring_cpu_ptr)
2536			lan743x_rx_close(&adapter->rx[index]);
2537	}
2538	lan743x_ptp_close(adapter);
2539
2540close_phy:
2541	lan743x_phy_close(adapter);
2542
2543close_mac:
2544	lan743x_mac_close(adapter);
2545
2546close_intr:
2547	lan743x_intr_close(adapter);
2548
2549return_error:
2550	netif_warn(adapter, ifup, adapter->netdev,
2551		   "Error opening LAN743x\n");
2552	return ret;
2553}
2554
2555static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
2556					     struct net_device *netdev)
2557{
2558	struct lan743x_adapter *adapter = netdev_priv(netdev);
2559
2560	return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
2561}
2562
2563static int lan743x_netdev_ioctl(struct net_device *netdev,
2564				struct ifreq *ifr, int cmd)
2565{
2566	if (!netif_running(netdev))
2567		return -EINVAL;
2568	if (cmd == SIOCSHWTSTAMP)
2569		return lan743x_ptp_ioctl(netdev, ifr, cmd);
2570	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
2571}
2572
2573static void lan743x_netdev_set_multicast(struct net_device *netdev)
2574{
2575	struct lan743x_adapter *adapter = netdev_priv(netdev);
2576
2577	lan743x_rfe_set_multicast(adapter);
2578}
2579
2580static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
2581{
2582	struct lan743x_adapter *adapter = netdev_priv(netdev);
2583	int ret = 0;
2584
2585	ret = lan743x_mac_set_mtu(adapter, new_mtu);
2586	if (!ret)
2587		netdev->mtu = new_mtu;
2588	return ret;
2589}
2590
2591static void lan743x_netdev_get_stats64(struct net_device *netdev,
2592				       struct rtnl_link_stats64 *stats)
2593{
2594	struct lan743x_adapter *adapter = netdev_priv(netdev);
2595
2596	stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
2597	stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
2598	stats->rx_bytes = lan743x_csr_read(adapter,
2599					   STAT_RX_UNICAST_BYTE_COUNT) +
2600			  lan743x_csr_read(adapter,
2601					   STAT_RX_BROADCAST_BYTE_COUNT) +
2602			  lan743x_csr_read(adapter,
2603					   STAT_RX_MULTICAST_BYTE_COUNT);
2604	stats->tx_bytes = lan743x_csr_read(adapter,
2605					   STAT_TX_UNICAST_BYTE_COUNT) +
2606			  lan743x_csr_read(adapter,
2607					   STAT_TX_BROADCAST_BYTE_COUNT) +
2608			  lan743x_csr_read(adapter,
2609					   STAT_TX_MULTICAST_BYTE_COUNT);
2610	stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
2611			   lan743x_csr_read(adapter,
2612					    STAT_RX_ALIGNMENT_ERRORS) +
2613			   lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
2614			   lan743x_csr_read(adapter,
2615					    STAT_RX_UNDERSIZE_FRAME_ERRORS) +
2616			   lan743x_csr_read(adapter,
2617					    STAT_RX_OVERSIZE_FRAME_ERRORS);
2618	stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
2619			   lan743x_csr_read(adapter,
2620					    STAT_TX_EXCESS_DEFERRAL_ERRORS) +
2621			   lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
2622	stats->rx_dropped = lan743x_csr_read(adapter,
2623					     STAT_RX_DROPPED_FRAMES);
2624	stats->tx_dropped = lan743x_csr_read(adapter,
2625					     STAT_TX_EXCESSIVE_COLLISION);
2626	stats->multicast = lan743x_csr_read(adapter,
2627					    STAT_RX_MULTICAST_FRAMES) +
2628			   lan743x_csr_read(adapter,
2629					    STAT_TX_MULTICAST_FRAMES);
2630	stats->collisions = lan743x_csr_read(adapter,
2631					     STAT_TX_SINGLE_COLLISIONS) +
2632			    lan743x_csr_read(adapter,
2633					     STAT_TX_MULTIPLE_COLLISIONS) +
2634			    lan743x_csr_read(adapter,
2635					     STAT_TX_LATE_COLLISIONS);
2636}
2637
2638static int lan743x_netdev_set_mac_address(struct net_device *netdev,
2639					  void *addr)
2640{
2641	struct lan743x_adapter *adapter = netdev_priv(netdev);
2642	struct sockaddr *sock_addr = addr;
2643	int ret;
2644
2645	ret = eth_prepare_mac_addr_change(netdev, sock_addr);
2646	if (ret)
2647		return ret;
2648	ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
2649	lan743x_mac_set_address(adapter, sock_addr->sa_data);
2650	lan743x_rfe_update_mac_address(adapter);
2651	return 0;
2652}
2653
2654static const struct net_device_ops lan743x_netdev_ops = {
2655	.ndo_open		= lan743x_netdev_open,
2656	.ndo_stop		= lan743x_netdev_close,
2657	.ndo_start_xmit		= lan743x_netdev_xmit_frame,
2658	.ndo_do_ioctl		= lan743x_netdev_ioctl,
2659	.ndo_set_rx_mode	= lan743x_netdev_set_multicast,
2660	.ndo_change_mtu		= lan743x_netdev_change_mtu,
2661	.ndo_get_stats64	= lan743x_netdev_get_stats64,
2662	.ndo_set_mac_address	= lan743x_netdev_set_mac_address,
2663};
2664
2665static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
2666{
2667	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
2668}
2669
2670static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
2671{
2672	mdiobus_unregister(adapter->mdiobus);
2673}
2674
2675static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
2676{
2677	unregister_netdev(adapter->netdev);
2678
2679	lan743x_mdiobus_cleanup(adapter);
2680	lan743x_hardware_cleanup(adapter);
2681	lan743x_pci_cleanup(adapter);
2682}
2683
2684static int lan743x_hardware_init(struct lan743x_adapter *adapter,
2685				 struct pci_dev *pdev)
2686{
2687	struct lan743x_tx *tx;
2688	int index;
2689	int ret;
2690
2691	adapter->intr.irq = adapter->pdev->irq;
2692	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
2693
2694	ret = lan743x_gpio_init(adapter);
2695	if (ret)
2696		return ret;
2697
2698	ret = lan743x_mac_init(adapter);
2699	if (ret)
2700		return ret;
2701
2702	ret = lan743x_phy_init(adapter);
2703	if (ret)
2704		return ret;
2705
2706	ret = lan743x_ptp_init(adapter);
2707	if (ret)
2708		return ret;
2709
2710	lan743x_rfe_update_mac_address(adapter);
2711
2712	ret = lan743x_dmac_init(adapter);
2713	if (ret)
2714		return ret;
2715
2716	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2717		adapter->rx[index].adapter = adapter;
2718		adapter->rx[index].channel_number = index;
2719	}
2720
2721	tx = &adapter->tx[0];
2722	tx->adapter = adapter;
2723	tx->channel_number = 0;
2724	spin_lock_init(&tx->ring_lock);
2725	return 0;
2726}
2727
2728static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
2729{
2730	int ret;
2731
2732	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
2733	if (!(adapter->mdiobus)) {
2734		ret = -ENOMEM;
2735		goto return_error;
2736	}
2737
2738	adapter->mdiobus->priv = (void *)adapter;
2739	adapter->mdiobus->read = lan743x_mdiobus_read;
2740	adapter->mdiobus->write = lan743x_mdiobus_write;
2741	adapter->mdiobus->name = "lan743x-mdiobus";
2742	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
2743		 "pci-%s", pci_name(adapter->pdev));
2744
2745	if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
2746		/* LAN7430 uses internal phy at address 1 */
2747		adapter->mdiobus->phy_mask = ~(u32)BIT(1);
2748
2749	/* register mdiobus */
2750	ret = mdiobus_register(adapter->mdiobus);
2751	if (ret < 0)
2752		goto return_error;
2753	return 0;
2754
2755return_error:
2756	return ret;
2757}
2758
2759/* lan743x_pcidev_probe - Device Initialization Routine
2760 * @pdev: PCI device information struct
2761 * @id: entry in lan743x_pci_tbl
2762 *
2763 * Returns 0 on success, negative on failure
2764 *
2765 * initializes an adapter identified by a pci_dev structure.
2766 * The OS initialization, configuring of the adapter private structure,
2767 * and a hardware reset occur.
2768 **/
2769static int lan743x_pcidev_probe(struct pci_dev *pdev,
2770				const struct pci_device_id *id)
2771{
2772	struct lan743x_adapter *adapter = NULL;
2773	struct net_device *netdev = NULL;
2774	int ret = -ENODEV;
2775
2776	netdev = devm_alloc_etherdev(&pdev->dev,
2777				     sizeof(struct lan743x_adapter));
2778	if (!netdev)
2779		goto return_error;
2780
2781	SET_NETDEV_DEV(netdev, &pdev->dev);
2782	pci_set_drvdata(pdev, netdev);
2783	adapter = netdev_priv(netdev);
2784	adapter->netdev = netdev;
2785	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
2786			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
2787			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
2788	netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
2789
2790	of_get_mac_address(pdev->dev.of_node, adapter->mac_address);
2791
2792	ret = lan743x_pci_init(adapter, pdev);
2793	if (ret)
2794		goto return_error;
2795
2796	ret = lan743x_csr_init(adapter);
2797	if (ret)
2798		goto cleanup_pci;
2799
2800	ret = lan743x_hardware_init(adapter, pdev);
2801	if (ret)
2802		goto cleanup_pci;
2803
2804	ret = lan743x_mdiobus_init(adapter);
2805	if (ret)
2806		goto cleanup_hardware;
2807
2808	adapter->netdev->netdev_ops = &lan743x_netdev_ops;
2809	adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
2810	adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2811	adapter->netdev->hw_features = adapter->netdev->features;
2812
2813	/* carrier off reporting is important to ethtool even BEFORE open */
2814	netif_carrier_off(netdev);
2815
2816	ret = register_netdev(adapter->netdev);
2817	if (ret < 0)
2818		goto cleanup_mdiobus;
2819	return 0;
2820
2821cleanup_mdiobus:
2822	lan743x_mdiobus_cleanup(adapter);
2823
2824cleanup_hardware:
2825	lan743x_hardware_cleanup(adapter);
2826
2827cleanup_pci:
2828	lan743x_pci_cleanup(adapter);
2829
2830return_error:
2831	pr_warn("Initialization failed\n");
2832	return ret;
2833}
2834
2835/**
2836 * lan743x_pcidev_remove - Device Removal Routine
2837 * @pdev: PCI device information struct
2838 *
2839 * this is called by the PCI subsystem to alert the driver
2840 * that it should release a PCI device.  This could be caused by a
2841 * Hot-Plug event, or because the driver is going to be removed from
2842 * memory.
2843 **/
2844static void lan743x_pcidev_remove(struct pci_dev *pdev)
2845{
2846	struct net_device *netdev = pci_get_drvdata(pdev);
2847	struct lan743x_adapter *adapter = netdev_priv(netdev);
2848
2849	lan743x_full_cleanup(adapter);
2850}
2851
2852static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
2853{
2854	struct net_device *netdev = pci_get_drvdata(pdev);
2855	struct lan743x_adapter *adapter = netdev_priv(netdev);
2856
2857	rtnl_lock();
2858	netif_device_detach(netdev);
2859
2860	/* close netdev when netdev is at running state.
2861	 * For instance, it is true when system goes to sleep by pm-suspend
2862	 * However, it is false when system goes to sleep by suspend GUI menu
2863	 */
2864	if (netif_running(netdev))
2865		lan743x_netdev_close(netdev);
2866	rtnl_unlock();
2867
2868#ifdef CONFIG_PM
2869	pci_save_state(pdev);
2870#endif
2871
2872	/* clean up lan743x portion */
2873	lan743x_hardware_cleanup(adapter);
2874}
2875
2876#ifdef CONFIG_PM_SLEEP
2877static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
2878{
2879	return bitrev16(crc16(0xFFFF, buf, len));
2880}
2881
2882static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
2883{
2884	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
2885	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
2886	const u8 arp_type[2] = { 0x08, 0x06 };
2887	int mask_index;
2888	u32 pmtctl;
2889	u32 wucsr;
2890	u32 macrx;
2891	u16 crc;
2892
2893	for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
2894		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
2895
2896	/* clear wake settings */
2897	pmtctl = lan743x_csr_read(adapter, PMT_CTL);
2898	pmtctl |= PMT_CTL_WUPS_MASK_;
2899	pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
2900		PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
2901		PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
2902
2903	macrx = lan743x_csr_read(adapter, MAC_RX);
2904
2905	wucsr = 0;
2906	mask_index = 0;
2907
2908	pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
2909
2910	if (adapter->wolopts & WAKE_PHY) {
2911		pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
2912		pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
2913	}
2914	if (adapter->wolopts & WAKE_MAGIC) {
2915		wucsr |= MAC_WUCSR_MPEN_;
2916		macrx |= MAC_RX_RXEN_;
2917		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2918	}
2919	if (adapter->wolopts & WAKE_UCAST) {
2920		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
2921		macrx |= MAC_RX_RXEN_;
2922		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2923		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
2924	}
2925	if (adapter->wolopts & WAKE_BCAST) {
2926		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
2927		macrx |= MAC_RX_RXEN_;
2928		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2929		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
2930	}
2931	if (adapter->wolopts & WAKE_MCAST) {
2932		/* IPv4 multicast */
2933		crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
2934		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
2935				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
2936				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
2937				  (crc & MAC_WUF_CFG_CRC16_MASK_));
2938		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
2939		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
2940		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
2941		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
2942		mask_index++;
2943
2944		/* IPv6 multicast */
2945		crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
2946		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
2947				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
2948				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
2949				  (crc & MAC_WUF_CFG_CRC16_MASK_));
2950		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
2951		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
2952		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
2953		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
2954		mask_index++;
2955
2956		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
2957		macrx |= MAC_RX_RXEN_;
2958		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2959		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
2960	}
2961	if (adapter->wolopts & WAKE_ARP) {
2962		/* set MAC_WUF_CFG & WUF_MASK
2963		 * for packettype (offset 12,13) = ARP (0x0806)
2964		 */
2965		crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
2966		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
2967				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
2968				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
2969				  (crc & MAC_WUF_CFG_CRC16_MASK_));
2970		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
2971		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
2972		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
2973		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
2974		mask_index++;
2975
2976		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
2977		macrx |= MAC_RX_RXEN_;
2978		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2979		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
2980	}
2981
2982	lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
2983	lan743x_csr_write(adapter, PMT_CTL, pmtctl);
2984	lan743x_csr_write(adapter, MAC_RX, macrx);
2985}
2986
2987static int lan743x_pm_suspend(struct device *dev)
2988{
2989	struct pci_dev *pdev = to_pci_dev(dev);
2990	struct net_device *netdev = pci_get_drvdata(pdev);
2991	struct lan743x_adapter *adapter = netdev_priv(netdev);
2992
2993	lan743x_pcidev_shutdown(pdev);
2994
2995	/* clear all wakes */
2996	lan743x_csr_write(adapter, MAC_WUCSR, 0);
2997	lan743x_csr_write(adapter, MAC_WUCSR2, 0);
2998	lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
2999
3000	if (adapter->wolopts)
3001		lan743x_pm_set_wol(adapter);
3002
3003	/* Host sets PME_En, put D3hot */
3004	return pci_prepare_to_sleep(pdev);
3005}
3006
3007static int lan743x_pm_resume(struct device *dev)
3008{
3009	struct pci_dev *pdev = to_pci_dev(dev);
3010	struct net_device *netdev = pci_get_drvdata(pdev);
3011	struct lan743x_adapter *adapter = netdev_priv(netdev);
3012	int ret;
3013
3014	pci_set_power_state(pdev, PCI_D0);
3015	pci_restore_state(pdev);
3016	pci_save_state(pdev);
3017
3018	ret = lan743x_hardware_init(adapter, pdev);
3019	if (ret) {
3020		netif_err(adapter, probe, adapter->netdev,
3021			  "lan743x_hardware_init returned %d\n", ret);
3022	}
3023
3024	/* open netdev when netdev is at running state while resume.
3025	 * For instance, it is true when system wakesup after pm-suspend
3026	 * However, it is false when system wakes up after suspend GUI menu
3027	 */
3028	if (netif_running(netdev))
3029		lan743x_netdev_open(netdev);
3030
3031	netif_device_attach(netdev);
3032
3033	return 0;
3034}
3035
3036static const struct dev_pm_ops lan743x_pm_ops = {
3037	SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3038};
3039#endif /* CONFIG_PM_SLEEP */
3040
3041static const struct pci_device_id lan743x_pcidev_tbl[] = {
3042	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
3043	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
3044	{ 0, }
3045};
3046
3047MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
3048
3049static struct pci_driver lan743x_pcidev_driver = {
3050	.name     = DRIVER_NAME,
3051	.id_table = lan743x_pcidev_tbl,
3052	.probe    = lan743x_pcidev_probe,
3053	.remove   = lan743x_pcidev_remove,
3054#ifdef CONFIG_PM_SLEEP
3055	.driver.pm = &lan743x_pm_ops,
3056#endif
3057	.shutdown = lan743x_pcidev_shutdown,
3058};
3059
3060module_pci_driver(lan743x_pcidev_driver);
3061
3062MODULE_AUTHOR(DRIVER_AUTHOR);
3063MODULE_DESCRIPTION(DRIVER_DESC);
3064MODULE_LICENSE("GPL");