Linux Audio

Check our new training course

Loading...
v6.8
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/* Copyright (C) 2018 Microchip Technology Inc. */
   3
   4#include <linux/module.h>
   5#include <linux/pci.h>
   6#include <linux/netdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/crc32.h>
   9#include <linux/microchipphy.h>
  10#include <linux/net_tstamp.h>
  11#include <linux/of_mdio.h>
  12#include <linux/of_net.h>
  13#include <linux/phy.h>
  14#include <linux/phy_fixed.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/iopoll.h>
  17#include <linux/crc16.h>
  18#include "lan743x_main.h"
  19#include "lan743x_ethtool.h"
  20
  21#define MMD_ACCESS_ADDRESS	0
  22#define MMD_ACCESS_WRITE	1
  23#define MMD_ACCESS_READ		2
  24#define MMD_ACCESS_READ_INC	3
  25#define PCS_POWER_STATE_DOWN	0x6
  26#define PCS_POWER_STATE_UP	0x4
  27
  28static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
  29{
  30	u32 chip_rev;
  31	u32 cfg_load;
  32	u32 hw_cfg;
  33	u32 strap;
  34	int ret;
  35
  36	/* Timeout = 100 (i.e. 1 sec (10 msce * 100)) */
  37	ret = lan743x_hs_syslock_acquire(adapter, 100);
  38	if (ret < 0) {
  39		netif_err(adapter, drv, adapter->netdev,
  40			  "Sys Lock acquire failed ret:%d\n", ret);
  41		return;
  42	}
  43
  44	cfg_load = lan743x_csr_read(adapter, ETH_SYS_CONFIG_LOAD_STARTED_REG);
  45	lan743x_hs_syslock_release(adapter);
  46	hw_cfg = lan743x_csr_read(adapter, HW_CFG);
  47
  48	if (cfg_load & GEN_SYS_LOAD_STARTED_REG_ETH_ ||
  49	    hw_cfg & HW_CFG_RST_PROTECT_) {
  50		strap = lan743x_csr_read(adapter, STRAP_READ);
  51		if (strap & STRAP_READ_SGMII_EN_)
  52			adapter->is_sgmii_en = true;
  53		else
  54			adapter->is_sgmii_en = false;
  55	} else {
  56		chip_rev = lan743x_csr_read(adapter, FPGA_REV);
  57		if (chip_rev) {
  58			if (chip_rev & FPGA_SGMII_OP)
  59				adapter->is_sgmii_en = true;
  60			else
  61				adapter->is_sgmii_en = false;
  62		} else {
  63			adapter->is_sgmii_en = false;
  64		}
  65	}
  66	netif_dbg(adapter, drv, adapter->netdev,
  67		  "SGMII I/F %sable\n", adapter->is_sgmii_en ? "En" : "Dis");
  68}
  69
  70static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
  71{
  72	struct lan743x_csr *csr = &adapter->csr;
  73	u32 id_rev = csr->id_rev;
  74
  75	if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) ||
  76	    ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) {
  77		return true;
  78	}
  79	return false;
  80}
  81
  82static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
  83{
  84	pci_release_selected_regions(adapter->pdev,
  85				     pci_select_bars(adapter->pdev,
  86						     IORESOURCE_MEM));
  87	pci_disable_device(adapter->pdev);
  88}
  89
  90static int lan743x_pci_init(struct lan743x_adapter *adapter,
  91			    struct pci_dev *pdev)
  92{
  93	unsigned long bars = 0;
  94	int ret;
  95
  96	adapter->pdev = pdev;
  97	ret = pci_enable_device_mem(pdev);
  98	if (ret)
  99		goto return_error;
 100
 101	netif_info(adapter, probe, adapter->netdev,
 102		   "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
 103		   pdev->vendor, pdev->device);
 104	bars = pci_select_bars(pdev, IORESOURCE_MEM);
 105	if (!test_bit(0, &bars))
 106		goto disable_device;
 107
 108	ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
 109	if (ret)
 110		goto disable_device;
 111
 112	pci_set_master(pdev);
 113	return 0;
 114
 115disable_device:
 116	pci_disable_device(adapter->pdev);
 117
 118return_error:
 119	return ret;
 120}
 121
 122u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
 123{
 124	return ioread32(&adapter->csr.csr_address[offset]);
 125}
 126
 127void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
 128		       u32 data)
 129{
 130	iowrite32(data, &adapter->csr.csr_address[offset]);
 131}
 132
 133#define LAN743X_CSR_READ_OP(offset)	lan743x_csr_read(adapter, offset)
 134
 135static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
 136{
 137	u32 data;
 138
 139	data = lan743x_csr_read(adapter, HW_CFG);
 140	data |= HW_CFG_LRST_;
 141	lan743x_csr_write(adapter, HW_CFG, data);
 142
 143	return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
 144				  !(data & HW_CFG_LRST_), 100000, 10000000);
 145}
 146
 147static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter,
 148					   int offset, u32 bit_mask,
 149					   int target_value, int udelay_min,
 150					   int udelay_max, int count)
 151{
 152	u32 data;
 153
 154	return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data,
 155					 target_value == !!(data & bit_mask),
 156					 udelay_max, udelay_min * count);
 157}
 158
 159static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
 160				    int offset, u32 bit_mask,
 161				    int target_value, int usleep_min,
 162				    int usleep_max, int count)
 163{
 164	u32 data;
 165
 166	return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
 167				  target_value == !!(data & bit_mask),
 168				  usleep_max, usleep_min * count);
 169}
 170
 171static int lan743x_csr_init(struct lan743x_adapter *adapter)
 172{
 173	struct lan743x_csr *csr = &adapter->csr;
 174	resource_size_t bar_start, bar_length;
 
 175
 176	bar_start = pci_resource_start(adapter->pdev, 0);
 177	bar_length = pci_resource_len(adapter->pdev, 0);
 178	csr->csr_address = devm_ioremap(&adapter->pdev->dev,
 179					bar_start, bar_length);
 180	if (!csr->csr_address)
 181		return -ENOMEM;
 
 
 182
 183	csr->id_rev = lan743x_csr_read(adapter, ID_REV);
 184	csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
 185	netif_info(adapter, probe, adapter->netdev,
 186		   "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
 187		   csr->id_rev,	FPGA_REV_GET_MAJOR_(csr->fpga_rev),
 188		   FPGA_REV_GET_MINOR_(csr->fpga_rev));
 189	if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev))
 190		return -ENODEV;
 
 
 191
 192	csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 193	switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
 194	case ID_REV_CHIP_REV_A0_:
 195		csr->flags |= LAN743X_CSR_FLAG_IS_A0;
 196		csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 197		break;
 198	case ID_REV_CHIP_REV_B0_:
 199		csr->flags |= LAN743X_CSR_FLAG_IS_B0;
 200		break;
 201	}
 202
 203	return lan743x_csr_light_reset(adapter);
 
 
 
 
 
 204}
 205
 206static void lan743x_intr_software_isr(struct lan743x_adapter *adapter)
 207{
 
 208	struct lan743x_intr *intr = &adapter->intr;
 
 209
 210	/* disable the interrupt to prevent repeated re-triggering */
 211	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
 212	intr->software_isr_flag = true;
 213	wake_up(&intr->software_isr_wq);
 
 214}
 215
 216static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
 217{
 218	struct lan743x_tx *tx = context;
 219	struct lan743x_adapter *adapter = tx->adapter;
 220	bool enable_flag = true;
 
 221
 222	lan743x_csr_read(adapter, INT_EN_SET);
 223	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 224		lan743x_csr_write(adapter, INT_EN_CLR,
 225				  INT_BIT_DMA_TX_(tx->channel_number));
 226	}
 227
 228	if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
 229		u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
 230		u32 dmac_int_sts;
 231		u32 dmac_int_en;
 232
 233		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 234			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 235		else
 236			dmac_int_sts = ioc_bit;
 237		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 238			dmac_int_en = lan743x_csr_read(adapter,
 239						       DMAC_INT_EN_SET);
 240		else
 241			dmac_int_en = ioc_bit;
 242
 243		dmac_int_en &= ioc_bit;
 244		dmac_int_sts &= dmac_int_en;
 245		if (dmac_int_sts & ioc_bit) {
 246			napi_schedule(&tx->napi);
 247			enable_flag = false;/* poll func will enable later */
 248		}
 249	}
 250
 251	if (enable_flag)
 252		/* enable isr */
 253		lan743x_csr_write(adapter, INT_EN_SET,
 254				  INT_BIT_DMA_TX_(tx->channel_number));
 255}
 256
 257static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
 258{
 259	struct lan743x_rx *rx = context;
 260	struct lan743x_adapter *adapter = rx->adapter;
 261	bool enable_flag = true;
 262
 263	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 264		lan743x_csr_write(adapter, INT_EN_CLR,
 265				  INT_BIT_DMA_RX_(rx->channel_number));
 266	}
 267
 268	if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
 269		u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
 270		u32 dmac_int_sts;
 271		u32 dmac_int_en;
 272
 273		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 274			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 275		else
 276			dmac_int_sts = rx_frame_bit;
 277		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 278			dmac_int_en = lan743x_csr_read(adapter,
 279						       DMAC_INT_EN_SET);
 280		else
 281			dmac_int_en = rx_frame_bit;
 282
 283		dmac_int_en &= rx_frame_bit;
 284		dmac_int_sts &= dmac_int_en;
 285		if (dmac_int_sts & rx_frame_bit) {
 286			napi_schedule(&rx->napi);
 287			enable_flag = false;/* poll funct will enable later */
 288		}
 289	}
 290
 291	if (enable_flag) {
 292		/* enable isr */
 293		lan743x_csr_write(adapter, INT_EN_SET,
 294				  INT_BIT_DMA_RX_(rx->channel_number));
 295	}
 296}
 297
 298static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
 299{
 300	struct lan743x_adapter *adapter = context;
 301	unsigned int channel;
 302
 303	if (int_sts & INT_BIT_ALL_RX_) {
 304		for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
 305			channel++) {
 306			u32 int_bit = INT_BIT_DMA_RX_(channel);
 307
 308			if (int_sts & int_bit) {
 309				lan743x_rx_isr(&adapter->rx[channel],
 310					       int_bit, flags);
 311				int_sts &= ~int_bit;
 312			}
 313		}
 314	}
 315	if (int_sts & INT_BIT_ALL_TX_) {
 316		for (channel = 0; channel < adapter->used_tx_channels;
 317			channel++) {
 318			u32 int_bit = INT_BIT_DMA_TX_(channel);
 319
 320			if (int_sts & int_bit) {
 321				lan743x_tx_isr(&adapter->tx[channel],
 322					       int_bit, flags);
 323				int_sts &= ~int_bit;
 324			}
 325		}
 326	}
 327	if (int_sts & INT_BIT_ALL_OTHER_) {
 328		if (int_sts & INT_BIT_SW_GP_) {
 329			lan743x_intr_software_isr(adapter);
 330			int_sts &= ~INT_BIT_SW_GP_;
 331		}
 332		if (int_sts & INT_BIT_1588_) {
 333			lan743x_ptp_isr(adapter);
 334			int_sts &= ~INT_BIT_1588_;
 335		}
 336	}
 337	if (int_sts)
 338		lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
 339}
 340
 341static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
 342{
 343	struct lan743x_vector *vector = ptr;
 344	struct lan743x_adapter *adapter = vector->adapter;
 345	irqreturn_t result = IRQ_NONE;
 346	u32 int_enables;
 347	u32 int_sts;
 348
 349	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
 350		int_sts = lan743x_csr_read(adapter, INT_STS);
 351	} else if (vector->flags &
 352		   (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
 353		   LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
 354		int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
 355	} else {
 356		/* use mask as implied status */
 357		int_sts = vector->int_mask | INT_BIT_MAS_;
 358	}
 359
 360	if (!(int_sts & INT_BIT_MAS_))
 361		goto irq_done;
 362
 363	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
 364		/* disable vector interrupt */
 365		lan743x_csr_write(adapter,
 366				  INT_VEC_EN_CLR,
 367				  INT_VEC_EN_(vector->vector_index));
 368
 369	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
 370		/* disable master interrupt */
 371		lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 372
 373	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
 374		int_enables = lan743x_csr_read(adapter, INT_EN_SET);
 375	} else {
 376		/*  use vector mask as implied enable mask */
 377		int_enables = vector->int_mask;
 378	}
 379
 380	int_sts &= int_enables;
 381	int_sts &= vector->int_mask;
 382	if (int_sts) {
 383		if (vector->handler) {
 384			vector->handler(vector->context,
 385					int_sts, vector->flags);
 386		} else {
 387			/* disable interrupts on this vector */
 388			lan743x_csr_write(adapter, INT_EN_CLR,
 389					  vector->int_mask);
 390		}
 391		result = IRQ_HANDLED;
 392	}
 393
 394	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
 395		/* enable master interrupt */
 396		lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 397
 398	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
 399		/* enable vector interrupt */
 400		lan743x_csr_write(adapter,
 401				  INT_VEC_EN_SET,
 402				  INT_VEC_EN_(vector->vector_index));
 403irq_done:
 404	return result;
 405}
 406
 407static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
 408{
 409	struct lan743x_intr *intr = &adapter->intr;
 410	int ret;
 
 411
 412	intr->software_isr_flag = false;
 413
 414	/* enable and activate test interrupt */
 415	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
 
 
 416	lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
 
 
 
 
 417
 418	ret = wait_event_timeout(intr->software_isr_wq,
 419				 intr->software_isr_flag,
 420				 msecs_to_jiffies(200));
 421
 422	/* disable test interrupt */
 423	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
 424
 425	return ret > 0 ? 0 : -ENODEV;
 426}
 427
 428static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
 429				     int vector_index, u32 flags,
 430				     u32 int_mask,
 431				     lan743x_vector_handler handler,
 432				     void *context)
 433{
 434	struct lan743x_vector *vector = &adapter->intr.vector_list
 435					[vector_index];
 436	int ret;
 437
 438	vector->adapter = adapter;
 439	vector->flags = flags;
 440	vector->vector_index = vector_index;
 441	vector->int_mask = int_mask;
 442	vector->handler = handler;
 443	vector->context = context;
 444
 445	ret = request_irq(vector->irq,
 446			  lan743x_intr_entry_isr,
 447			  (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
 448			  IRQF_SHARED : 0, DRIVER_NAME, vector);
 449	if (ret) {
 450		vector->handler = NULL;
 451		vector->context = NULL;
 452		vector->int_mask = 0;
 453		vector->flags = 0;
 454	}
 455	return ret;
 456}
 457
 458static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
 459					int vector_index)
 460{
 461	struct lan743x_vector *vector = &adapter->intr.vector_list
 462					[vector_index];
 463
 464	free_irq(vector->irq, vector);
 465	vector->handler = NULL;
 466	vector->context = NULL;
 467	vector->int_mask = 0;
 468	vector->flags = 0;
 469}
 470
 471static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
 472					 u32 int_mask)
 473{
 474	int index;
 475
 476	for (index = 0; index < adapter->max_vector_count; index++) {
 477		if (adapter->intr.vector_list[index].int_mask & int_mask)
 478			return adapter->intr.vector_list[index].flags;
 479	}
 480	return 0;
 481}
 482
 483static void lan743x_intr_close(struct lan743x_adapter *adapter)
 484{
 485	struct lan743x_intr *intr = &adapter->intr;
 486	int index = 0;
 487
 488	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 489	if (adapter->is_pci11x1x)
 490		lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF);
 491	else
 492		lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
 493
 494	for (index = 0; index < intr->number_of_vectors; index++) {
 495		if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
 496			lan743x_intr_unregister_isr(adapter, index);
 497			intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
 498		}
 499	}
 500
 501	if (intr->flags & INTR_FLAG_MSI_ENABLED) {
 502		pci_disable_msi(adapter->pdev);
 503		intr->flags &= ~INTR_FLAG_MSI_ENABLED;
 504	}
 505
 506	if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
 507		pci_disable_msix(adapter->pdev);
 508		intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
 509	}
 510}
 511
 512static int lan743x_intr_open(struct lan743x_adapter *adapter)
 513{
 514	struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT];
 515	struct lan743x_intr *intr = &adapter->intr;
 516	unsigned int used_tx_channels;
 517	u32 int_vec_en_auto_clr = 0;
 518	u8 max_vector_count;
 519	u32 int_vec_map0 = 0;
 520	u32 int_vec_map1 = 0;
 521	int ret = -ENODEV;
 522	int index = 0;
 523	u32 flags = 0;
 524
 525	intr->number_of_vectors = 0;
 526
 527	/* Try to set up MSIX interrupts */
 528	max_vector_count = adapter->max_vector_count;
 529	memset(&msix_entries[0], 0,
 530	       sizeof(struct msix_entry) * max_vector_count);
 531	for (index = 0; index < max_vector_count; index++)
 532		msix_entries[index].entry = index;
 533	used_tx_channels = adapter->used_tx_channels;
 534	ret = pci_enable_msix_range(adapter->pdev,
 535				    msix_entries, 1,
 536				    1 + used_tx_channels +
 537				    LAN743X_USED_RX_CHANNELS);
 538
 539	if (ret > 0) {
 540		intr->flags |= INTR_FLAG_MSIX_ENABLED;
 541		intr->number_of_vectors = ret;
 542		intr->using_vectors = true;
 543		for (index = 0; index < intr->number_of_vectors; index++)
 544			intr->vector_list[index].irq = msix_entries
 545						       [index].vector;
 546		netif_info(adapter, ifup, adapter->netdev,
 547			   "using MSIX interrupts, number of vectors = %d\n",
 548			   intr->number_of_vectors);
 549	}
 550
 551	/* If MSIX failed try to setup using MSI interrupts */
 552	if (!intr->number_of_vectors) {
 553		if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 554			if (!pci_enable_msi(adapter->pdev)) {
 555				intr->flags |= INTR_FLAG_MSI_ENABLED;
 556				intr->number_of_vectors = 1;
 557				intr->using_vectors = true;
 558				intr->vector_list[0].irq =
 559					adapter->pdev->irq;
 560				netif_info(adapter, ifup, adapter->netdev,
 561					   "using MSI interrupts, number of vectors = %d\n",
 562					   intr->number_of_vectors);
 563			}
 564		}
 565	}
 566
 567	/* If MSIX, and MSI failed, setup using legacy interrupt */
 568	if (!intr->number_of_vectors) {
 569		intr->number_of_vectors = 1;
 570		intr->using_vectors = false;
 571		intr->vector_list[0].irq = intr->irq;
 572		netif_info(adapter, ifup, adapter->netdev,
 573			   "using legacy interrupts\n");
 574	}
 575
 576	/* At this point we must have at least one irq */
 577	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
 578
 579	/* map all interrupts to vector 0 */
 580	lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
 581	lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
 582	lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
 583	flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 584		LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 585		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 586		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 587
 588	if (intr->using_vectors) {
 589		flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 590			 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 591	} else {
 592		flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
 593			 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
 594			 LAN743X_VECTOR_FLAG_IRQ_SHARED;
 595	}
 596
 597	if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 598		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
 599		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
 600		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 601		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
 602		flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
 603		flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
 604	}
 605
 606	init_waitqueue_head(&intr->software_isr_wq);
 607
 608	ret = lan743x_intr_register_isr(adapter, 0, flags,
 609					INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
 610					INT_BIT_ALL_OTHER_,
 611					lan743x_intr_shared_isr, adapter);
 612	if (ret)
 613		goto clean_up;
 614	intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
 615
 616	if (intr->using_vectors)
 617		lan743x_csr_write(adapter, INT_VEC_EN_SET,
 618				  INT_VEC_EN_(0));
 619
 620	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 621		lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
 622		lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
 623		lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
 624		lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
 625		lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
 626		lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
 627		lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
 628		lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
 629		if (adapter->is_pci11x1x) {
 630			lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD);
 631			lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD);
 632			lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654);
 633			lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210);
 634		} else {
 635			lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
 636			lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
 637		}
 638		lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
 639	}
 640
 641	/* enable interrupts */
 642	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 643	ret = lan743x_intr_test_isr(adapter);
 644	if (ret)
 645		goto clean_up;
 646
 647	if (intr->number_of_vectors > 1) {
 648		int number_of_tx_vectors = intr->number_of_vectors - 1;
 649
 650		if (number_of_tx_vectors > used_tx_channels)
 651			number_of_tx_vectors = used_tx_channels;
 652		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 653			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 654			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 655			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 656			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 657			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 658
 659		if (adapter->csr.flags &
 660		   LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 661			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 662				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 663				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 664				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 665		}
 666
 667		for (index = 0; index < number_of_tx_vectors; index++) {
 668			u32 int_bit = INT_BIT_DMA_TX_(index);
 669			int vector = index + 1;
 670
 671			/* map TX interrupt to vector */
 672			int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
 673			lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
 674
 675			/* Remove TX interrupt from shared mask */
 676			intr->vector_list[0].int_mask &= ~int_bit;
 677			ret = lan743x_intr_register_isr(adapter, vector, flags,
 678							int_bit, lan743x_tx_isr,
 679							&adapter->tx[index]);
 680			if (ret)
 681				goto clean_up;
 682			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 683			if (!(flags &
 684			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
 685				lan743x_csr_write(adapter, INT_VEC_EN_SET,
 686						  INT_VEC_EN_(vector));
 687		}
 688	}
 689	if ((intr->number_of_vectors - used_tx_channels) > 1) {
 690		int number_of_rx_vectors = intr->number_of_vectors -
 691						used_tx_channels - 1;
 692
 693		if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
 694			number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
 695
 696		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 697			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 698			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 699			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 700			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 701			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 702
 703		if (adapter->csr.flags &
 704		    LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 705			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
 706				LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 707				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 708				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 709				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 710		}
 711		for (index = 0; index < number_of_rx_vectors; index++) {
 712			int vector = index + 1 + used_tx_channels;
 713			u32 int_bit = INT_BIT_DMA_RX_(index);
 714
 715			/* map RX interrupt to vector */
 716			int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
 717			lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
 718			if (flags &
 719			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
 720				int_vec_en_auto_clr |= INT_VEC_EN_(vector);
 721				lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
 722						  int_vec_en_auto_clr);
 723			}
 724
 725			/* Remove RX interrupt from shared mask */
 726			intr->vector_list[0].int_mask &= ~int_bit;
 727			ret = lan743x_intr_register_isr(adapter, vector, flags,
 728							int_bit, lan743x_rx_isr,
 729							&adapter->rx[index]);
 730			if (ret)
 731				goto clean_up;
 732			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 733
 734			lan743x_csr_write(adapter, INT_VEC_EN_SET,
 735					  INT_VEC_EN_(vector));
 736		}
 737	}
 738	return 0;
 739
 740clean_up:
 741	lan743x_intr_close(adapter);
 742	return ret;
 743}
 744
 745static int lan743x_dp_write(struct lan743x_adapter *adapter,
 746			    u32 select, u32 addr, u32 length, u32 *buf)
 747{
 
 748	u32 dp_sel;
 749	int i;
 750
 751	if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_,
 752					    1, 40, 100, 100))
 753		return -EIO;
 
 754	dp_sel = lan743x_csr_read(adapter, DP_SEL);
 755	dp_sel &= ~DP_SEL_MASK_;
 756	dp_sel |= select;
 757	lan743x_csr_write(adapter, DP_SEL, dp_sel);
 758
 759	for (i = 0; i < length; i++) {
 760		lan743x_csr_write(adapter, DP_ADDR, addr + i);
 761		lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
 762		lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
 763		if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL,
 764						    DP_SEL_DPRDY_,
 765						    1, 40, 100, 100))
 766			return -EIO;
 767	}
 
 768
 769	return 0;
 
 
 770}
 771
 772static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
 773{
 774	u32 ret;
 775
 776	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
 777		MAC_MII_ACC_PHY_ADDR_MASK_;
 778	ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
 779		MAC_MII_ACC_MIIRINDA_MASK_;
 780
 781	if (read)
 782		ret |= MAC_MII_ACC_MII_READ_;
 783	else
 784		ret |= MAC_MII_ACC_MII_WRITE_;
 785	ret |= MAC_MII_ACC_MII_BUSY_;
 786
 787	return ret;
 788}
 789
 790static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
 791{
 792	u32 data;
 793
 794	return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
 795				  !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
 796}
 797
 798static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index)
 799{
 800	struct lan743x_adapter *adapter = bus->priv;
 801	u32 val, mii_access;
 802	int ret;
 803
 804	/* comfirm MII not busy */
 805	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 806	if (ret < 0)
 807		return ret;
 808
 809	/* set the address, index & direction (read from PHY) */
 810	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
 811	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 812	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 813	if (ret < 0)
 814		return ret;
 815
 816	val = lan743x_csr_read(adapter, MAC_MII_DATA);
 817	return (int)(val & 0xFFFF);
 818}
 819
 820static int lan743x_mdiobus_write_c22(struct mii_bus *bus,
 821				     int phy_id, int index, u16 regval)
 822{
 823	struct lan743x_adapter *adapter = bus->priv;
 824	u32 val, mii_access;
 825	int ret;
 826
 827	/* confirm MII not busy */
 828	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 829	if (ret < 0)
 830		return ret;
 831	val = (u32)regval;
 832	lan743x_csr_write(adapter, MAC_MII_DATA, val);
 833
 834	/* set the address, index & direction (write to PHY) */
 835	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
 836	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 837	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 838	return ret;
 839}
 840
 841static u32 lan743x_mac_mmd_access(int id, int dev_addr, int op)
 842{
 843	u32 ret;
 844
 845	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
 846		MAC_MII_ACC_PHY_ADDR_MASK_;
 847	ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
 848		MAC_MII_ACC_MIIMMD_MASK_;
 849	if (op == MMD_ACCESS_WRITE)
 850		ret |= MAC_MII_ACC_MIICMD_WRITE_;
 851	else if (op == MMD_ACCESS_READ)
 852		ret |= MAC_MII_ACC_MIICMD_READ_;
 853	else if (op == MMD_ACCESS_READ_INC)
 854		ret |= MAC_MII_ACC_MIICMD_READ_INC_;
 855	else
 856		ret |= MAC_MII_ACC_MIICMD_ADDR_;
 857	ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_);
 858
 859	return ret;
 860}
 861
 862static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id,
 863				    int dev_addr, int index)
 864{
 865	struct lan743x_adapter *adapter = bus->priv;
 866	u32 mmd_access;
 867	int ret;
 868
 869	/* comfirm MII not busy */
 870	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 871	if (ret < 0)
 872		return ret;
 873
 874	/* Load Register Address */
 875	lan743x_csr_write(adapter, MAC_MII_DATA, index);
 876	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
 877					    MMD_ACCESS_ADDRESS);
 878	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 879	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 880	if (ret < 0)
 881		return ret;
 882
 883	/* Read Data */
 884	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
 885					    MMD_ACCESS_READ);
 886	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 887	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 888	if (ret < 0)
 889		return ret;
 890
 891	ret = lan743x_csr_read(adapter, MAC_MII_DATA);
 892	return (int)(ret & 0xFFFF);
 893}
 894
 895static int lan743x_mdiobus_write_c45(struct mii_bus *bus, int phy_id,
 896				     int dev_addr, int index, u16 regval)
 897{
 898	struct lan743x_adapter *adapter = bus->priv;
 899	u32 mmd_access;
 900	int ret;
 901
 902	/* confirm MII not busy */
 903	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 904	if (ret < 0)
 905		return ret;
 906
 907	/* Load Register Address */
 908	lan743x_csr_write(adapter, MAC_MII_DATA, (u32)index);
 909	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
 910					    MMD_ACCESS_ADDRESS);
 911	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 912	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 913	if (ret < 0)
 914		return ret;
 915
 916	/* Write Data */
 917	lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
 918	mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
 919					    MMD_ACCESS_WRITE);
 920	lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
 921
 922	return lan743x_mac_mii_wait_till_not_busy(adapter);
 923}
 924
 925static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
 926{
 927	u32 data;
 928	int ret;
 929
 930	ret = readx_poll_timeout(LAN743X_CSR_READ_OP, SGMII_ACC, data,
 931				 !(data & SGMII_ACC_SGMII_BZY_), 100, 1000000);
 932	if (ret < 0)
 933		netif_err(adapter, drv, adapter->netdev,
 934			  "%s: error %d sgmii wait timeout\n", __func__, ret);
 935
 936	return ret;
 937}
 938
 939int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr)
 940{
 941	u32 mmd_access;
 942	int ret;
 943	u32 val;
 944
 945	if (mmd > 31) {
 946		netif_err(adapter, probe, adapter->netdev,
 947			  "%s mmd should <= 31\n", __func__);
 948		return -EINVAL;
 949	}
 950
 951	mutex_lock(&adapter->sgmii_rw_lock);
 952	/* Load Register Address */
 953	mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
 954	mmd_access |= (addr | SGMII_ACC_SGMII_BZY_);
 955	lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
 956	ret = lan743x_sgmii_wait_till_not_busy(adapter);
 957	if (ret < 0)
 958		goto sgmii_unlock;
 959
 960	val = lan743x_csr_read(adapter, SGMII_DATA);
 961	ret = (int)(val & SGMII_DATA_MASK_);
 962
 963sgmii_unlock:
 964	mutex_unlock(&adapter->sgmii_rw_lock);
 965
 966	return ret;
 967}
 968
 969static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
 970			       u8 mmd, u16 addr, u16 val)
 971{
 972	u32 mmd_access;
 973	int ret;
 974
 975	if (mmd > 31) {
 976		netif_err(adapter, probe, adapter->netdev,
 977			  "%s mmd should <= 31\n", __func__);
 978		return -EINVAL;
 979	}
 980	mutex_lock(&adapter->sgmii_rw_lock);
 981	/* Load Register Data */
 982	lan743x_csr_write(adapter, SGMII_DATA, (u32)(val & SGMII_DATA_MASK_));
 983	/* Load Register Address */
 984	mmd_access = mmd << SGMII_ACC_SGMII_MMD_SHIFT_;
 985	mmd_access |= (addr | SGMII_ACC_SGMII_BZY_ | SGMII_ACC_SGMII_WR_);
 986	lan743x_csr_write(adapter, SGMII_ACC, mmd_access);
 987	ret = lan743x_sgmii_wait_till_not_busy(adapter);
 988	mutex_unlock(&adapter->sgmii_rw_lock);
 989
 990	return ret;
 991}
 992
 993static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
 994				  u16 baud)
 995{
 996	int mpllctrl0;
 997	int mpllctrl1;
 998	int miscctrl1;
 999	int ret;
1000
1001	mpllctrl0 = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1002				       VR_MII_GEN2_4_MPLL_CTRL0);
1003	if (mpllctrl0 < 0)
1004		return mpllctrl0;
1005
1006	mpllctrl0 &= ~VR_MII_MPLL_CTRL0_USE_REFCLK_PAD_;
1007	if (baud == VR_MII_BAUD_RATE_1P25GBPS) {
1008		mpllctrl1 = VR_MII_MPLL_MULTIPLIER_100;
1009		/* mpll_baud_clk/4 */
1010		miscctrl1 = 0xA;
1011	} else {
1012		mpllctrl1 = VR_MII_MPLL_MULTIPLIER_125;
1013		/* mpll_baud_clk/2 */
1014		miscctrl1 = 0x5;
1015	}
1016
1017	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1018				  VR_MII_GEN2_4_MPLL_CTRL0, mpllctrl0);
1019	if (ret < 0)
1020		return ret;
1021
1022	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1023				  VR_MII_GEN2_4_MPLL_CTRL1, mpllctrl1);
1024	if (ret < 0)
1025		return ret;
1026
1027	return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1028				  VR_MII_GEN2_4_MISC_CTRL1, miscctrl1);
1029}
1030
1031static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
1032				       bool enable)
1033{
1034	if (enable)
1035		return lan743x_sgmii_mpll_set(adapter,
1036					      VR_MII_BAUD_RATE_3P125GBPS);
1037	else
1038		return lan743x_sgmii_mpll_set(adapter,
1039					      VR_MII_BAUD_RATE_1P25GBPS);
1040}
1041
1042static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
1043				      bool *status)
1044{
1045	int ret;
1046
1047	ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1048				 VR_MII_GEN2_4_MPLL_CTRL1);
1049	if (ret < 0)
1050		return ret;
1051
1052	if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
1053	    ret == VR_MII_MPLL_MULTIPLIER_50)
1054		*status = true;
1055	else
1056		*status = false;
1057
1058	return 0;
1059}
1060
1061static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
1062{
1063	enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
1064	int mii_ctrl;
1065	int dgt_ctrl;
1066	int an_ctrl;
1067	int ret;
1068
1069	if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE)
1070		/* Switch to 2.5 Gbps */
1071		ret = lan743x_sgmii_2_5G_mode_set(adapter, true);
1072	else
1073		/* Switch to 10/100/1000 Mbps clock */
1074		ret = lan743x_sgmii_2_5G_mode_set(adapter, false);
1075	if (ret < 0)
1076		return ret;
1077
1078	/* Enable SGMII Auto NEG */
1079	mii_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
1080	if (mii_ctrl < 0)
1081		return mii_ctrl;
1082
1083	an_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, VR_MII_AN_CTRL);
1084	if (an_ctrl < 0)
1085		return an_ctrl;
1086
1087	dgt_ctrl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1088				      VR_MII_DIG_CTRL1);
1089	if (dgt_ctrl < 0)
1090		return dgt_ctrl;
1091
1092	if (lsd == LINK_2500_MASTER || lsd == LINK_2500_SLAVE) {
1093		mii_ctrl &= ~(BMCR_ANENABLE | BMCR_ANRESTART | BMCR_SPEED100);
1094		mii_ctrl |= BMCR_SPEED1000;
1095		dgt_ctrl |= VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
1096		dgt_ctrl &= ~VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
1097		/* In order for Auto-Negotiation to operate properly at
1098		 * 2.5 Gbps the 1.6ms link timer values must be adjusted
1099		 * The VR_MII_LINK_TIMER_CTRL Register must be set to
1100		 * 16'h7A1 and The CL37_TMR_OVR_RIDE bit of the
1101		 * VR_MII_DIG_CTRL1 Register set to 1
1102		 */
1103		ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1104					  VR_MII_LINK_TIMER_CTRL, 0x7A1);
1105		if (ret < 0)
1106			return ret;
1107	} else {
1108		mii_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1109		an_ctrl &= ~VR_MII_AN_CTRL_SGMII_LINK_STS_;
1110		dgt_ctrl &= ~VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE_;
1111		dgt_ctrl |= VR_MII_DIG_CTRL1_MAC_AUTO_SW_;
1112	}
1113
1114	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR,
1115				  mii_ctrl);
1116	if (ret < 0)
1117		return ret;
1118
1119	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1120				  VR_MII_DIG_CTRL1, dgt_ctrl);
1121	if (ret < 0)
1122		return ret;
1123
1124	return lan743x_sgmii_write(adapter, MDIO_MMD_VEND2,
1125				  VR_MII_AN_CTRL, an_ctrl);
1126}
1127
1128static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
1129{
1130	u8 wait_cnt = 0;
1131	u32 dig_sts;
1132
1133	do {
1134		dig_sts = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
1135					     VR_MII_DIG_STS);
1136		if (((dig_sts & VR_MII_DIG_STS_PSEQ_STATE_MASK_) >>
1137		      VR_MII_DIG_STS_PSEQ_STATE_POS_) == state)
1138			break;
1139		usleep_range(1000, 2000);
1140	} while (wait_cnt++ < 10);
1141
1142	if (wait_cnt >= 10)
1143		return -ETIMEDOUT;
1144
1145	return 0;
1146}
1147
1148static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
1149{
1150	struct net_device *netdev = adapter->netdev;
1151	struct phy_device *phydev = netdev->phydev;
1152	enum lan743x_sgmii_lsd lsd = POWER_DOWN;
1153	int mii_ctl;
1154	bool status;
1155	int ret;
1156
1157	switch (phydev->speed) {
1158	case SPEED_2500:
1159		if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
1160			lsd = LINK_2500_MASTER;
1161		else
1162			lsd = LINK_2500_SLAVE;
1163		break;
1164	case SPEED_1000:
1165		if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
1166			lsd = LINK_1000_MASTER;
1167		else
1168			lsd = LINK_1000_SLAVE;
1169		break;
1170	case SPEED_100:
1171		if (phydev->duplex)
1172			lsd = LINK_100FD;
1173		else
1174			lsd = LINK_100HD;
1175		break;
1176	case SPEED_10:
1177		if (phydev->duplex)
1178			lsd = LINK_10FD;
1179		else
1180			lsd = LINK_10HD;
1181		break;
1182	default:
1183		netif_err(adapter, drv, adapter->netdev,
1184			  "Invalid speed %d\n", phydev->speed);
1185		return -EINVAL;
1186	}
1187
1188	adapter->sgmii_lsd = lsd;
1189	ret = lan743x_sgmii_aneg_update(adapter);
1190	if (ret < 0) {
1191		netif_err(adapter, drv, adapter->netdev,
1192			  "error %d SGMII cfg failed\n", ret);
1193		return ret;
1194	}
1195
1196	ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
1197	if (ret < 0) {
1198		netif_err(adapter, drv, adapter->netdev,
1199			  "erro %d SGMII get mode failed\n", ret);
1200		return ret;
1201	}
1202
1203	if (status)
1204		netif_dbg(adapter, drv, adapter->netdev,
1205			  "SGMII 2.5G mode enable\n");
1206	else
1207		netif_dbg(adapter, drv, adapter->netdev,
1208			  "SGMII 1G mode enable\n");
1209
1210	/* SGMII/1000/2500BASE-X PCS power down */
1211	mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
1212	if (mii_ctl < 0)
1213		return mii_ctl;
1214
1215	mii_ctl |= BMCR_PDOWN;
1216	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
1217	if (ret < 0)
1218		return ret;
1219
1220	ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_DOWN);
1221	if (ret < 0)
1222		return ret;
1223
1224	/* SGMII/1000/2500BASE-X PCS power up */
1225	mii_ctl &= ~BMCR_PDOWN;
1226	ret = lan743x_sgmii_write(adapter, MDIO_MMD_VEND2, MII_BMCR, mii_ctl);
1227	if (ret < 0)
1228		return ret;
1229
1230	ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
1231	if (ret < 0)
1232		return ret;
1233
1234	return 0;
1235}
1236
1237static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
1238				    u8 *addr)
1239{
1240	u32 addr_lo, addr_hi;
1241
1242	addr_lo = addr[0] |
1243		addr[1] << 8 |
1244		addr[2] << 16 |
1245		addr[3] << 24;
1246	addr_hi = addr[4] |
1247		addr[5] << 8;
1248	lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
1249	lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
1250
1251	ether_addr_copy(adapter->mac_address, addr);
1252	netif_info(adapter, drv, adapter->netdev,
1253		   "MAC address set to %pM\n", addr);
1254}
1255
1256static int lan743x_mac_init(struct lan743x_adapter *adapter)
1257{
1258	bool mac_address_valid = true;
1259	struct net_device *netdev;
1260	u32 mac_addr_hi = 0;
1261	u32 mac_addr_lo = 0;
1262	u32 data;
1263
1264	netdev = adapter->netdev;
1265
1266	/* disable auto duplex, and speed detection. Phylib does that */
1267	data = lan743x_csr_read(adapter, MAC_CR);
1268	data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
1269	data |= MAC_CR_CNTR_RST_;
1270	lan743x_csr_write(adapter, MAC_CR, data);
1271
1272	if (!is_valid_ether_addr(adapter->mac_address)) {
1273		mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
1274		mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
1275		adapter->mac_address[0] = mac_addr_lo & 0xFF;
1276		adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
1277		adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
1278		adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
1279		adapter->mac_address[4] = mac_addr_hi & 0xFF;
1280		adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
1281
1282		if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
1283		    mac_addr_lo == 0xFFFFFFFF) {
1284			mac_address_valid = false;
1285		} else if (!is_valid_ether_addr(adapter->mac_address)) {
1286			mac_address_valid = false;
1287		}
1288
1289		if (!mac_address_valid)
1290			eth_random_addr(adapter->mac_address);
1291	}
1292	lan743x_mac_set_address(adapter, adapter->mac_address);
1293	eth_hw_addr_set(netdev, adapter->mac_address);
1294
1295	return 0;
1296}
1297
1298static int lan743x_mac_open(struct lan743x_adapter *adapter)
1299{
 
1300	u32 temp;
1301
1302	temp = lan743x_csr_read(adapter, MAC_RX);
1303	lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
1304	temp = lan743x_csr_read(adapter, MAC_TX);
1305	lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
1306	return 0;
1307}
1308
1309static void lan743x_mac_close(struct lan743x_adapter *adapter)
1310{
1311	u32 temp;
1312
1313	temp = lan743x_csr_read(adapter, MAC_TX);
1314	temp &= ~MAC_TX_TXEN_;
1315	lan743x_csr_write(adapter, MAC_TX, temp);
1316	lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
1317				 1, 1000, 20000, 100);
1318
1319	temp = lan743x_csr_read(adapter, MAC_RX);
1320	temp &= ~MAC_RX_RXEN_;
1321	lan743x_csr_write(adapter, MAC_RX, temp);
1322	lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
1323				 1, 1000, 20000, 100);
1324}
1325
1326void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
1327				       bool tx_enable, bool rx_enable)
1328{
1329	u32 flow_setting = 0;
1330
1331	/* set maximum pause time because when fifo space frees
1332	 * up a zero value pause frame will be sent to release the pause
1333	 */
1334	flow_setting = MAC_FLOW_CR_FCPT_MASK_;
1335	if (tx_enable)
1336		flow_setting |= MAC_FLOW_CR_TX_FCEN_;
1337	if (rx_enable)
1338		flow_setting |= MAC_FLOW_CR_RX_FCEN_;
1339	lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
1340}
1341
1342static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
1343{
1344	int enabled = 0;
1345	u32 mac_rx = 0;
1346
1347	mac_rx = lan743x_csr_read(adapter, MAC_RX);
1348	if (mac_rx & MAC_RX_RXEN_) {
1349		enabled = 1;
1350		if (mac_rx & MAC_RX_RXD_) {
1351			lan743x_csr_write(adapter, MAC_RX, mac_rx);
1352			mac_rx &= ~MAC_RX_RXD_;
1353		}
1354		mac_rx &= ~MAC_RX_RXEN_;
1355		lan743x_csr_write(adapter, MAC_RX, mac_rx);
1356		lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
1357					 1, 1000, 20000, 100);
1358		lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
1359	}
1360
1361	mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
1362	mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
1363		  << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1364	lan743x_csr_write(adapter, MAC_RX, mac_rx);
1365
1366	if (enabled) {
1367		mac_rx |= MAC_RX_RXEN_;
1368		lan743x_csr_write(adapter, MAC_RX, mac_rx);
1369	}
1370	return 0;
1371}
1372
1373/* PHY */
1374static int lan743x_phy_reset(struct lan743x_adapter *adapter)
1375{
1376	u32 data;
1377
1378	/* Only called with in probe, and before mdiobus_register */
1379
1380	data = lan743x_csr_read(adapter, PMT_CTL);
1381	data |= PMT_CTL_ETH_PHY_RST_;
1382	lan743x_csr_write(adapter, PMT_CTL, data);
1383
1384	return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
1385				  (!(data & PMT_CTL_ETH_PHY_RST_) &&
1386				  (data & PMT_CTL_READY_)),
1387				  50000, 1000000);
1388}
1389
1390static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
1391					   u16 local_adv, u16 remote_adv)
 
1392{
1393	struct lan743x_phy *phy = &adapter->phy;
1394	u8 cap;
1395
1396	if (phy->fc_autoneg)
1397		cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
1398	else
1399		cap = phy->fc_request_control;
1400
1401	lan743x_mac_flow_ctrl_set_enables(adapter,
1402					  cap & FLOW_CTRL_TX,
1403					  cap & FLOW_CTRL_RX);
1404}
1405
1406static int lan743x_phy_init(struct lan743x_adapter *adapter)
1407{
1408	return lan743x_phy_reset(adapter);
1409}
1410
1411static void lan743x_phy_link_status_change(struct net_device *netdev)
1412{
1413	struct lan743x_adapter *adapter = netdev_priv(netdev);
1414	struct phy_device *phydev = netdev->phydev;
1415	u32 data;
1416
1417	phy_print_status(phydev);
1418	if (phydev->state == PHY_RUNNING) {
 
1419		int remote_advertisement = 0;
1420		int local_advertisement = 0;
1421
1422		data = lan743x_csr_read(adapter, MAC_CR);
1423
 
 
 
 
 
 
 
 
1424		/* set duplex mode */
1425		if (phydev->duplex)
1426			data |= MAC_CR_DPX_;
1427		else
1428			data &= ~MAC_CR_DPX_;
1429
1430		/* set bus speed */
1431		switch (phydev->speed) {
1432		case SPEED_10:
1433			data &= ~MAC_CR_CFG_H_;
1434			data &= ~MAC_CR_CFG_L_;
1435		break;
1436		case SPEED_100:
1437			data &= ~MAC_CR_CFG_H_;
1438			data |= MAC_CR_CFG_L_;
1439		break;
1440		case SPEED_1000:
1441			data |= MAC_CR_CFG_H_;
1442			data &= ~MAC_CR_CFG_L_;
1443		break;
1444		case SPEED_2500:
1445			data |= MAC_CR_CFG_H_;
1446			data |= MAC_CR_CFG_L_;
1447		break;
1448		}
1449		lan743x_csr_write(adapter, MAC_CR, data);
1450
 
 
1451		local_advertisement =
1452			linkmode_adv_to_mii_adv_t(phydev->advertising);
1453		remote_advertisement =
1454			linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
1455
1456		lan743x_phy_update_flowcontrol(adapter, local_advertisement,
 
 
1457					       remote_advertisement);
1458		lan743x_ptp_update_latency(adapter, phydev->speed);
1459		if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
1460		    phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
1461		    phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
1462			lan743x_sgmii_config(adapter);
1463	}
1464}
1465
1466static void lan743x_phy_close(struct lan743x_adapter *adapter)
1467{
1468	struct net_device *netdev = adapter->netdev;
1469	struct phy_device *phydev = netdev->phydev;
1470
1471	phy_stop(netdev->phydev);
1472	phy_disconnect(netdev->phydev);
1473
1474	/* using phydev here as phy_disconnect NULLs netdev->phydev */
1475	if (phy_is_pseudo_fixed_link(phydev))
1476		fixed_phy_unregister(phydev);
1477
1478}
1479
1480static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
1481{
1482	u32 id_rev;
1483	u32 data;
1484
1485	data = lan743x_csr_read(adapter, MAC_CR);
1486	id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
1487
1488	if (adapter->is_pci11x1x && adapter->is_sgmii_en)
1489		adapter->phy_interface = PHY_INTERFACE_MODE_SGMII;
1490	else if (id_rev == ID_REV_ID_LAN7430_)
1491		adapter->phy_interface = PHY_INTERFACE_MODE_GMII;
1492	else if ((id_rev == ID_REV_ID_LAN7431_) && (data & MAC_CR_MII_EN_))
1493		adapter->phy_interface = PHY_INTERFACE_MODE_MII;
1494	else
1495		adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
1496}
1497
1498static int lan743x_phy_open(struct lan743x_adapter *adapter)
1499{
1500	struct net_device *netdev = adapter->netdev;
1501	struct lan743x_phy *phy = &adapter->phy;
1502	struct fixed_phy_status fphy_status = {
1503		.link = 1,
1504		.speed = SPEED_1000,
1505		.duplex = DUPLEX_FULL,
1506	};
1507	struct phy_device *phydev;
 
1508	int ret = -EIO;
1509
1510	/* try devicetree phy, or fixed link */
1511	phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
1512					lan743x_phy_link_status_change);
1513
1514	if (!phydev) {
1515		/* try internal phy */
1516		phydev = phy_find_first(adapter->mdiobus);
1517		if (!phydev)	{
1518			if ((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
1519					ID_REV_ID_LAN7431_) {
1520				phydev = fixed_phy_register(PHY_POLL,
1521							    &fphy_status, NULL);
1522				if (IS_ERR(phydev)) {
1523					netdev_err(netdev, "No PHY/fixed_PHY found\n");
1524					return PTR_ERR(phydev);
1525				}
1526			} else {
1527				goto return_error;
1528				}
1529		}
1530
1531		lan743x_phy_interface_select(adapter);
 
 
 
 
 
 
 
 
1532
1533		ret = phy_connect_direct(netdev, phydev,
1534					 lan743x_phy_link_status_change,
1535					 adapter->phy_interface);
1536		if (ret)
1537			goto return_error;
1538	}
1539
1540	/* MAC doesn't support 1000T Half */
1541	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1542
1543	/* support both flow controls */
1544	phy_support_asym_pause(phydev);
1545	phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1546	phy->fc_autoneg = phydev->autoneg;
1547
1548	phy_start(phydev);
1549	phy_start_aneg(phydev);
1550	phy_attached_info(phydev);
1551	return 0;
1552
1553return_error:
1554	return ret;
1555}
1556
1557static void lan743x_rfe_open(struct lan743x_adapter *adapter)
1558{
1559	lan743x_csr_write(adapter, RFE_RSS_CFG,
1560		RFE_RSS_CFG_UDP_IPV6_EX_ |
1561		RFE_RSS_CFG_TCP_IPV6_EX_ |
1562		RFE_RSS_CFG_IPV6_EX_ |
1563		RFE_RSS_CFG_UDP_IPV6_ |
1564		RFE_RSS_CFG_TCP_IPV6_ |
1565		RFE_RSS_CFG_IPV6_ |
1566		RFE_RSS_CFG_UDP_IPV4_ |
1567		RFE_RSS_CFG_TCP_IPV4_ |
1568		RFE_RSS_CFG_IPV4_ |
1569		RFE_RSS_CFG_VALID_HASH_BITS_ |
1570		RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
1571		RFE_RSS_CFG_RSS_HASH_STORE_ |
1572		RFE_RSS_CFG_RSS_ENABLE_);
1573}
1574
1575static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
1576{
1577	u8 *mac_addr;
1578	u32 mac_addr_hi = 0;
1579	u32 mac_addr_lo = 0;
1580
1581	/* Add mac address to perfect Filter */
1582	mac_addr = adapter->mac_address;
1583	mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
1584		      (((u32)(mac_addr[1])) << 8) |
1585		      (((u32)(mac_addr[2])) << 16) |
1586		      (((u32)(mac_addr[3])) << 24));
1587	mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
1588		      (((u32)(mac_addr[5])) << 8));
1589
1590	lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
1591	lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
1592			  mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
1593}
1594
1595static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
1596{
1597	struct net_device *netdev = adapter->netdev;
1598	u32 hash_table[DP_SEL_VHF_HASH_LEN];
1599	u32 rfctl;
1600	u32 data;
1601
1602	rfctl = lan743x_csr_read(adapter, RFE_CTL);
1603	rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
1604		 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1605	rfctl |= RFE_CTL_AB_;
1606	if (netdev->flags & IFF_PROMISC) {
1607		rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
1608	} else {
1609		if (netdev->flags & IFF_ALLMULTI)
1610			rfctl |= RFE_CTL_AM_;
1611	}
1612
1613	if (netdev->features & NETIF_F_RXCSUM)
1614		rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_;
1615
1616	memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
1617	if (netdev_mc_count(netdev)) {
1618		struct netdev_hw_addr *ha;
1619		int i;
1620
1621		rfctl |= RFE_CTL_DA_PERFECT_;
1622		i = 1;
1623		netdev_for_each_mc_addr(ha, netdev) {
1624			/* set first 32 into Perfect Filter */
1625			if (i < 33) {
1626				lan743x_csr_write(adapter,
1627						  RFE_ADDR_FILT_HI(i), 0);
1628				data = ha->addr[3];
1629				data = ha->addr[2] | (data << 8);
1630				data = ha->addr[1] | (data << 8);
1631				data = ha->addr[0] | (data << 8);
1632				lan743x_csr_write(adapter,
1633						  RFE_ADDR_FILT_LO(i), data);
1634				data = ha->addr[5];
1635				data = ha->addr[4] | (data << 8);
1636				data |= RFE_ADDR_FILT_HI_VALID_;
1637				lan743x_csr_write(adapter,
1638						  RFE_ADDR_FILT_HI(i), data);
1639			} else {
1640				u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
1641					     23) & 0x1FF;
1642				hash_table[bitnum / 32] |= (1 << (bitnum % 32));
1643				rfctl |= RFE_CTL_MCAST_HASH_;
1644			}
1645			i++;
1646		}
1647	}
1648
1649	lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
1650			 DP_SEL_VHF_VLAN_LEN,
1651			 DP_SEL_VHF_HASH_LEN, hash_table);
1652	lan743x_csr_write(adapter, RFE_CTL, rfctl);
1653}
1654
1655static int lan743x_dmac_init(struct lan743x_adapter *adapter)
1656{
1657	u32 data = 0;
1658
1659	lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
1660	lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
1661				 0, 1000, 20000, 100);
1662	switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
1663	case DMA_DESCRIPTOR_SPACING_16:
1664		data = DMAC_CFG_MAX_DSPACE_16_;
1665		break;
1666	case DMA_DESCRIPTOR_SPACING_32:
1667		data = DMAC_CFG_MAX_DSPACE_32_;
1668		break;
1669	case DMA_DESCRIPTOR_SPACING_64:
1670		data = DMAC_CFG_MAX_DSPACE_64_;
1671		break;
1672	case DMA_DESCRIPTOR_SPACING_128:
1673		data = DMAC_CFG_MAX_DSPACE_128_;
1674		break;
1675	default:
1676		return -EPERM;
1677	}
1678	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1679		data |= DMAC_CFG_COAL_EN_;
1680	data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
1681	data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
1682	lan743x_csr_write(adapter, DMAC_CFG, data);
1683	data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
1684	data |= DMAC_COAL_CFG_TIMER_TX_START_;
1685	data |= DMAC_COAL_CFG_FLUSH_INTS_;
1686	data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
1687	data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
1688	data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
1689	data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
1690	lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
1691	data = DMAC_OBFF_TX_THRES_SET_(0x08);
1692	data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
1693	lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
1694	return 0;
1695}
1696
1697static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
1698				     int tx_channel)
1699{
1700	u32 dmac_cmd = 0;
1701
1702	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1703	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1704				      DMAC_CMD_START_T_(tx_channel)),
1705				      (dmac_cmd &
1706				      DMAC_CMD_STOP_T_(tx_channel)));
1707}
1708
1709static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
1710					     int tx_channel)
1711{
1712	int timeout = 100;
1713	int result = 0;
1714
1715	while (timeout &&
1716	       ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
1717	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1718		usleep_range(1000, 20000);
1719		timeout--;
1720	}
1721	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1722		result = -ENODEV;
1723	return result;
1724}
1725
1726static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
1727				     int rx_channel)
1728{
1729	u32 dmac_cmd = 0;
1730
1731	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1732	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1733				      DMAC_CMD_START_R_(rx_channel)),
1734				      (dmac_cmd &
1735				      DMAC_CMD_STOP_R_(rx_channel)));
1736}
1737
1738static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
1739					     int rx_channel)
1740{
1741	int timeout = 100;
1742	int result = 0;
1743
1744	while (timeout &&
1745	       ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
1746	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1747		usleep_range(1000, 20000);
1748		timeout--;
1749	}
1750	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1751		result = -ENODEV;
1752	return result;
1753}
1754
1755static void lan743x_tx_release_desc(struct lan743x_tx *tx,
1756				    int descriptor_index, bool cleanup)
1757{
1758	struct lan743x_tx_buffer_info *buffer_info = NULL;
1759	struct lan743x_tx_descriptor *descriptor = NULL;
1760	u32 descriptor_type = 0;
1761	bool ignore_sync;
1762
1763	descriptor = &tx->ring_cpu_ptr[descriptor_index];
1764	buffer_info = &tx->buffer_info[descriptor_index];
1765	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
1766		goto done;
1767
1768	descriptor_type = le32_to_cpu(descriptor->data0) &
1769			  TX_DESC_DATA0_DTYPE_MASK_;
1770	if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
1771		goto clean_up_data_descriptor;
1772	else
1773		goto clear_active;
1774
1775clean_up_data_descriptor:
1776	if (buffer_info->dma_ptr) {
1777		if (buffer_info->flags &
1778		    TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
1779			dma_unmap_page(&tx->adapter->pdev->dev,
1780				       buffer_info->dma_ptr,
1781				       buffer_info->buffer_length,
1782				       DMA_TO_DEVICE);
1783		} else {
1784			dma_unmap_single(&tx->adapter->pdev->dev,
1785					 buffer_info->dma_ptr,
1786					 buffer_info->buffer_length,
1787					 DMA_TO_DEVICE);
1788		}
1789		buffer_info->dma_ptr = 0;
1790		buffer_info->buffer_length = 0;
1791	}
1792	if (!buffer_info->skb)
1793		goto clear_active;
1794
1795	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
1796		dev_kfree_skb_any(buffer_info->skb);
1797		goto clear_skb;
1798	}
1799
1800	if (cleanup) {
1801		lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
1802		dev_kfree_skb_any(buffer_info->skb);
1803	} else {
1804		ignore_sync = (buffer_info->flags &
1805			       TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
1806		lan743x_ptp_tx_timestamp_skb(tx->adapter,
1807					     buffer_info->skb, ignore_sync);
1808	}
1809
1810clear_skb:
1811	buffer_info->skb = NULL;
1812
1813clear_active:
1814	buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
1815
1816done:
1817	memset(buffer_info, 0, sizeof(*buffer_info));
1818	memset(descriptor, 0, sizeof(*descriptor));
1819}
1820
1821static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
1822{
1823	return ((++index) % tx->ring_size);
1824}
1825
1826static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
1827{
1828	while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
1829		lan743x_tx_release_desc(tx, tx->last_head, false);
1830		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1831	}
1832}
1833
1834static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
1835{
1836	u32 original_head = 0;
1837
1838	original_head = tx->last_head;
1839	do {
1840		lan743x_tx_release_desc(tx, tx->last_head, true);
1841		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1842	} while (tx->last_head != original_head);
1843	memset(tx->ring_cpu_ptr, 0,
1844	       sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
1845	memset(tx->buffer_info, 0,
1846	       sizeof(*tx->buffer_info) * (tx->ring_size));
1847}
1848
1849static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
1850				   struct sk_buff *skb)
1851{
1852	int result = 1; /* 1 for the main skb buffer */
1853	int nr_frags = 0;
1854
1855	if (skb_is_gso(skb))
1856		result++; /* requires an extension descriptor */
1857	nr_frags = skb_shinfo(skb)->nr_frags;
1858	result += nr_frags; /* 1 for each fragment buffer */
1859	return result;
1860}
1861
1862static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
1863{
1864	int last_head = tx->last_head;
1865	int last_tail = tx->last_tail;
1866
1867	if (last_tail >= last_head)
1868		return tx->ring_size - last_tail + last_head - 1;
1869	else
1870		return last_head - last_tail - 1;
1871}
1872
1873static void lan743x_rx_cfg_b_tstamp_config(struct lan743x_adapter *adapter,
1874					   int rx_ts_config)
1875{
1876	int channel_number;
1877	int index;
1878	u32 data;
1879
1880	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
1881		channel_number = adapter->rx[index].channel_number;
1882		data = lan743x_csr_read(adapter, RX_CFG_B(channel_number));
1883		data &= RX_CFG_B_TS_MASK_;
1884		data |= rx_ts_config;
1885		lan743x_csr_write(adapter, RX_CFG_B(channel_number),
1886				  data);
1887	}
1888}
1889
1890int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
1891			       int rx_filter)
1892{
1893	u32 data;
1894
1895	switch (rx_filter) {
1896	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1897			lan743x_rx_cfg_b_tstamp_config(adapter,
1898						       RX_CFG_B_TS_DESCR_EN_);
1899			data = lan743x_csr_read(adapter, PTP_RX_TS_CFG);
1900			data |= PTP_RX_TS_CFG_EVENT_MSGS_;
1901			lan743x_csr_write(adapter, PTP_RX_TS_CFG, data);
1902			break;
1903	case HWTSTAMP_FILTER_NONE:
1904			lan743x_rx_cfg_b_tstamp_config(adapter,
1905						       RX_CFG_B_TS_NONE_);
1906			break;
1907	case HWTSTAMP_FILTER_ALL:
1908			lan743x_rx_cfg_b_tstamp_config(adapter,
1909						       RX_CFG_B_TS_ALL_RX_);
1910			break;
1911	default:
1912			return -ERANGE;
1913	}
1914	return 0;
1915}
1916
1917void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
1918				      bool enable_timestamping,
1919				      bool enable_onestep_sync)
1920{
1921	if (enable_timestamping)
1922		tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
1923	else
1924		tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
1925	if (enable_onestep_sync)
1926		tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
1927	else
1928		tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
1929}
1930
1931static int lan743x_tx_frame_start(struct lan743x_tx *tx,
1932				  unsigned char *first_buffer,
1933				  unsigned int first_buffer_length,
1934				  unsigned int frame_length,
1935				  bool time_stamp,
1936				  bool check_sum)
1937{
1938	/* called only from within lan743x_tx_xmit_frame.
1939	 * assuming tx->ring_lock has already been acquired.
1940	 */
1941	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1942	struct lan743x_tx_buffer_info *buffer_info = NULL;
1943	struct lan743x_adapter *adapter = tx->adapter;
1944	struct device *dev = &adapter->pdev->dev;
1945	dma_addr_t dma_ptr;
1946
1947	tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
1948	tx->frame_first = tx->last_tail;
1949	tx->frame_tail = tx->frame_first;
1950
1951	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1952	buffer_info = &tx->buffer_info[tx->frame_tail];
1953	dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
1954				 DMA_TO_DEVICE);
1955	if (dma_mapping_error(dev, dma_ptr))
1956		return -ENOMEM;
1957
1958	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
1959	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
1960	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
1961		TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
1962
1963	buffer_info->skb = NULL;
1964	buffer_info->dma_ptr = dma_ptr;
1965	buffer_info->buffer_length = first_buffer_length;
1966	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1967
1968	tx->frame_data0 = (first_buffer_length &
1969		TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1970		TX_DESC_DATA0_DTYPE_DATA_ |
1971		TX_DESC_DATA0_FS_ |
1972		TX_DESC_DATA0_FCS_;
1973	if (time_stamp)
1974		tx->frame_data0 |= TX_DESC_DATA0_TSE_;
1975
1976	if (check_sum)
1977		tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
1978				   TX_DESC_DATA0_IPE_ |
1979				   TX_DESC_DATA0_TPE_;
1980
1981	/* data0 will be programmed in one of other frame assembler functions */
1982	return 0;
1983}
1984
1985static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
1986				     unsigned int frame_length,
1987				     int nr_frags)
1988{
1989	/* called only from within lan743x_tx_xmit_frame.
1990	 * assuming tx->ring_lock has already been acquired.
1991	 */
1992	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1993	struct lan743x_tx_buffer_info *buffer_info = NULL;
1994
1995	/* wrap up previous descriptor */
1996	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
1997	if (nr_frags <= 0) {
1998		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1999		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
2000	}
2001	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
2002	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
2003
2004	/* move to next descriptor */
2005	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
2006	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
2007	buffer_info = &tx->buffer_info[tx->frame_tail];
2008
2009	/* add extension descriptor */
2010	tx_descriptor->data1 = 0;
2011	tx_descriptor->data2 = 0;
2012	tx_descriptor->data3 = 0;
2013
2014	buffer_info->skb = NULL;
2015	buffer_info->dma_ptr = 0;
2016	buffer_info->buffer_length = 0;
2017	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
2018
2019	tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
2020			  TX_DESC_DATA0_DTYPE_EXT_ |
2021			  TX_DESC_DATA0_EXT_LSO_;
2022
2023	/* data0 will be programmed in one of other frame assembler functions */
2024}
2025
2026static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
2027					 const skb_frag_t *fragment,
2028					 unsigned int frame_length)
2029{
2030	/* called only from within lan743x_tx_xmit_frame
2031	 * assuming tx->ring_lock has already been acquired
2032	 */
2033	struct lan743x_tx_descriptor *tx_descriptor = NULL;
2034	struct lan743x_tx_buffer_info *buffer_info = NULL;
2035	struct lan743x_adapter *adapter = tx->adapter;
2036	struct device *dev = &adapter->pdev->dev;
2037	unsigned int fragment_length = 0;
2038	dma_addr_t dma_ptr;
2039
2040	fragment_length = skb_frag_size(fragment);
2041	if (!fragment_length)
2042		return 0;
2043
2044	/* wrap up previous descriptor */
2045	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
2046	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
2047
2048	/* move to next descriptor */
2049	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
2050	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
2051	buffer_info = &tx->buffer_info[tx->frame_tail];
2052	dma_ptr = skb_frag_dma_map(dev, fragment,
2053				   0, fragment_length,
2054				   DMA_TO_DEVICE);
2055	if (dma_mapping_error(dev, dma_ptr)) {
2056		int desc_index;
2057
2058		/* cleanup all previously setup descriptors */
2059		desc_index = tx->frame_first;
2060		while (desc_index != tx->frame_tail) {
2061			lan743x_tx_release_desc(tx, desc_index, true);
2062			desc_index = lan743x_tx_next_index(tx, desc_index);
2063		}
2064		dma_wmb();
2065		tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
2066		tx->frame_first = 0;
2067		tx->frame_data0 = 0;
2068		tx->frame_tail = 0;
2069		return -ENOMEM;
2070	}
2071
2072	tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
2073	tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
2074	tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
2075			       TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
2076
2077	buffer_info->skb = NULL;
2078	buffer_info->dma_ptr = dma_ptr;
2079	buffer_info->buffer_length = fragment_length;
2080	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
2081	buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
2082
2083	tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
2084			  TX_DESC_DATA0_DTYPE_DATA_ |
2085			  TX_DESC_DATA0_FCS_;
2086
2087	/* data0 will be programmed in one of other frame assembler functions */
2088	return 0;
2089}
2090
2091static void lan743x_tx_frame_end(struct lan743x_tx *tx,
2092				 struct sk_buff *skb,
2093				 bool time_stamp,
2094				 bool ignore_sync)
2095{
2096	/* called only from within lan743x_tx_xmit_frame
2097	 * assuming tx->ring_lock has already been acquired
2098	 */
2099	struct lan743x_tx_descriptor *tx_descriptor = NULL;
2100	struct lan743x_tx_buffer_info *buffer_info = NULL;
2101	struct lan743x_adapter *adapter = tx->adapter;
2102	u32 tx_tail_flags = 0;
2103
2104	/* wrap up previous descriptor */
2105	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
2106	    TX_DESC_DATA0_DTYPE_DATA_) {
2107		tx->frame_data0 |= TX_DESC_DATA0_LS_;
2108		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
2109	}
2110
2111	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
2112	buffer_info = &tx->buffer_info[tx->frame_tail];
2113	buffer_info->skb = skb;
2114	if (time_stamp)
2115		buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
2116	if (ignore_sync)
2117		buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
2118
2119	tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
2120	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
2121	tx->last_tail = tx->frame_tail;
2122
2123	dma_wmb();
2124
2125	if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2126		tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
2127	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
2128		tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
2129		TX_TAIL_SET_TOP_INT_EN_;
2130
2131	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
2132			  tx_tail_flags | tx->frame_tail);
2133	tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
2134}
2135
2136static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
2137					 struct sk_buff *skb)
2138{
2139	int required_number_of_descriptors = 0;
2140	unsigned int start_frame_length = 0;
2141	netdev_tx_t retval = NETDEV_TX_OK;
2142	unsigned int frame_length = 0;
2143	unsigned int head_length = 0;
2144	unsigned long irq_flags = 0;
2145	bool do_timestamp = false;
2146	bool ignore_sync = false;
2147	struct netdev_queue *txq;
2148	int nr_frags = 0;
2149	bool gso = false;
2150	int j;
2151
2152	required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
2153
2154	spin_lock_irqsave(&tx->ring_lock, irq_flags);
2155	if (required_number_of_descriptors >
2156		lan743x_tx_get_avail_desc(tx)) {
2157		if (required_number_of_descriptors > (tx->ring_size - 1)) {
2158			dev_kfree_skb_irq(skb);
2159		} else {
2160			/* save how many descriptors we needed to restart the queue */
2161			tx->rqd_descriptors = required_number_of_descriptors;
2162			retval = NETDEV_TX_BUSY;
2163			txq = netdev_get_tx_queue(tx->adapter->netdev,
2164						  tx->channel_number);
2165			netif_tx_stop_queue(txq);
2166		}
2167		goto unlock;
2168	}
2169
2170	/* space available, transmit skb  */
2171	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2172	    (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
2173	    (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
2174		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2175		do_timestamp = true;
2176		if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
2177			ignore_sync = true;
2178	}
2179	head_length = skb_headlen(skb);
2180	frame_length = skb_pagelen(skb);
2181	nr_frags = skb_shinfo(skb)->nr_frags;
2182	start_frame_length = frame_length;
2183	gso = skb_is_gso(skb);
2184	if (gso) {
2185		start_frame_length = max(skb_shinfo(skb)->gso_size,
2186					 (unsigned short)8);
2187	}
2188
2189	if (lan743x_tx_frame_start(tx,
2190				   skb->data, head_length,
2191				   start_frame_length,
2192				   do_timestamp,
2193				   skb->ip_summed == CHECKSUM_PARTIAL)) {
2194		dev_kfree_skb_irq(skb);
2195		goto unlock;
2196	}
2197	tx->frame_count++;
2198
2199	if (gso)
2200		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
2201
2202	if (nr_frags <= 0)
2203		goto finish;
2204
2205	for (j = 0; j < nr_frags; j++) {
2206		const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
2207
2208		if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
2209			/* upon error no need to call
2210			 *	lan743x_tx_frame_end
2211			 * frame assembler clean up was performed inside
2212			 *	lan743x_tx_frame_add_fragment
2213			 */
2214			dev_kfree_skb_irq(skb);
2215			goto unlock;
2216		}
2217	}
2218
2219finish:
2220	lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
2221
2222unlock:
2223	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
2224	return retval;
2225}
2226
2227static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
2228{
2229	struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
2230	struct lan743x_adapter *adapter = tx->adapter;
 
2231	unsigned long irq_flags = 0;
2232	struct netdev_queue *txq;
2233	u32 ioc_bit = 0;
 
2234
2235	ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
2236	lan743x_csr_read(adapter, DMAC_INT_STS);
2237	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
2238		lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
2239	spin_lock_irqsave(&tx->ring_lock, irq_flags);
2240
2241	/* clean up tx ring */
2242	lan743x_tx_release_completed_descriptors(tx);
2243	txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number);
2244	if (netif_tx_queue_stopped(txq)) {
2245		if (tx->rqd_descriptors) {
2246			if (tx->rqd_descriptors <=
2247			    lan743x_tx_get_avail_desc(tx)) {
2248				tx->rqd_descriptors = 0;
2249				netif_tx_wake_queue(txq);
2250			}
2251		} else {
2252			netif_tx_wake_queue(txq);
2253		}
2254	}
2255	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
2256
 
 
 
 
 
 
 
2257	if (!napi_complete(napi))
2258		goto done;
2259
2260	/* enable isr */
2261	lan743x_csr_write(adapter, INT_EN_SET,
2262			  INT_BIT_DMA_TX_(tx->channel_number));
2263	lan743x_csr_read(adapter, INT_STS);
2264
2265done:
2266	return 0;
2267}
2268
2269static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
2270{
2271	if (tx->head_cpu_ptr) {
2272		dma_free_coherent(&tx->adapter->pdev->dev,
2273				  sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr,
2274				  tx->head_dma_ptr);
2275		tx->head_cpu_ptr = NULL;
2276		tx->head_dma_ptr = 0;
2277	}
2278	kfree(tx->buffer_info);
2279	tx->buffer_info = NULL;
2280
2281	if (tx->ring_cpu_ptr) {
2282		dma_free_coherent(&tx->adapter->pdev->dev,
2283				  tx->ring_allocation_size, tx->ring_cpu_ptr,
2284				  tx->ring_dma_ptr);
2285		tx->ring_allocation_size = 0;
2286		tx->ring_cpu_ptr = NULL;
2287		tx->ring_dma_ptr = 0;
2288	}
2289	tx->ring_size = 0;
2290}
2291
2292static int lan743x_tx_ring_init(struct lan743x_tx *tx)
2293{
2294	size_t ring_allocation_size = 0;
2295	void *cpu_ptr = NULL;
2296	dma_addr_t dma_ptr;
2297	int ret = -ENOMEM;
2298
2299	tx->ring_size = LAN743X_TX_RING_SIZE;
2300	if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
2301		ret = -EINVAL;
2302		goto cleanup;
2303	}
2304	if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
2305				      DMA_BIT_MASK(64))) {
2306		dev_warn(&tx->adapter->pdev->dev,
2307			 "lan743x_: No suitable DMA available\n");
2308		ret = -ENOMEM;
2309		goto cleanup;
2310	}
2311	ring_allocation_size = ALIGN(tx->ring_size *
2312				     sizeof(struct lan743x_tx_descriptor),
2313				     PAGE_SIZE);
2314	dma_ptr = 0;
2315	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
2316				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2317	if (!cpu_ptr) {
2318		ret = -ENOMEM;
2319		goto cleanup;
2320	}
2321
2322	tx->ring_allocation_size = ring_allocation_size;
2323	tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
2324	tx->ring_dma_ptr = dma_ptr;
2325
2326	cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
2327	if (!cpu_ptr) {
2328		ret = -ENOMEM;
2329		goto cleanup;
2330	}
2331	tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
2332	dma_ptr = 0;
2333	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
2334				     sizeof(*tx->head_cpu_ptr), &dma_ptr,
2335				     GFP_KERNEL);
2336	if (!cpu_ptr) {
2337		ret = -ENOMEM;
2338		goto cleanup;
2339	}
2340
2341	tx->head_cpu_ptr = cpu_ptr;
2342	tx->head_dma_ptr = dma_ptr;
2343	if (tx->head_dma_ptr & 0x3) {
2344		ret = -ENOMEM;
2345		goto cleanup;
2346	}
2347
2348	return 0;
2349
2350cleanup:
2351	lan743x_tx_ring_cleanup(tx);
2352	return ret;
2353}
2354
2355static void lan743x_tx_close(struct lan743x_tx *tx)
2356{
2357	struct lan743x_adapter *adapter = tx->adapter;
2358
2359	lan743x_csr_write(adapter,
2360			  DMAC_CMD,
2361			  DMAC_CMD_STOP_T_(tx->channel_number));
2362	lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
2363
2364	lan743x_csr_write(adapter,
2365			  DMAC_INT_EN_CLR,
2366			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
2367	lan743x_csr_write(adapter, INT_EN_CLR,
2368			  INT_BIT_DMA_TX_(tx->channel_number));
2369	napi_disable(&tx->napi);
2370	netif_napi_del(&tx->napi);
2371
2372	lan743x_csr_write(adapter, FCT_TX_CTL,
2373			  FCT_TX_CTL_DIS_(tx->channel_number));
2374	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
2375				 FCT_TX_CTL_EN_(tx->channel_number),
2376				 0, 1000, 20000, 100);
2377
2378	lan743x_tx_release_all_descriptors(tx);
2379
2380	tx->rqd_descriptors = 0;
 
 
 
2381
2382	lan743x_tx_ring_cleanup(tx);
2383}
2384
2385static int lan743x_tx_open(struct lan743x_tx *tx)
2386{
2387	struct lan743x_adapter *adapter = NULL;
2388	u32 data = 0;
2389	int ret;
2390
2391	adapter = tx->adapter;
2392	ret = lan743x_tx_ring_init(tx);
2393	if (ret)
2394		return ret;
2395
2396	/* initialize fifo */
2397	lan743x_csr_write(adapter, FCT_TX_CTL,
2398			  FCT_TX_CTL_RESET_(tx->channel_number));
2399	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
2400				 FCT_TX_CTL_RESET_(tx->channel_number),
2401				 0, 1000, 20000, 100);
2402
2403	/* enable fifo */
2404	lan743x_csr_write(adapter, FCT_TX_CTL,
2405			  FCT_TX_CTL_EN_(tx->channel_number));
2406
2407	/* reset tx channel */
2408	lan743x_csr_write(adapter, DMAC_CMD,
2409			  DMAC_CMD_TX_SWR_(tx->channel_number));
2410	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2411				 DMAC_CMD_TX_SWR_(tx->channel_number),
2412				 0, 1000, 20000, 100);
2413
2414	/* Write TX_BASE_ADDR */
2415	lan743x_csr_write(adapter,
2416			  TX_BASE_ADDRH(tx->channel_number),
2417			  DMA_ADDR_HIGH32(tx->ring_dma_ptr));
2418	lan743x_csr_write(adapter,
2419			  TX_BASE_ADDRL(tx->channel_number),
2420			  DMA_ADDR_LOW32(tx->ring_dma_ptr));
2421
2422	/* Write TX_CFG_B */
2423	data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
2424	data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
2425	data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
2426	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2427		data |= TX_CFG_B_TDMABL_512_;
2428	lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
2429
2430	/* Write TX_CFG_A */
2431	data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
2432	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2433		data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
2434		data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
2435		data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
2436		data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
2437	}
2438	lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
2439
2440	/* Write TX_HEAD_WRITEBACK_ADDR */
2441	lan743x_csr_write(adapter,
2442			  TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
2443			  DMA_ADDR_HIGH32(tx->head_dma_ptr));
2444	lan743x_csr_write(adapter,
2445			  TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
2446			  DMA_ADDR_LOW32(tx->head_dma_ptr));
2447
2448	/* set last head */
2449	tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
2450
2451	/* write TX_TAIL */
2452	tx->last_tail = 0;
2453	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
2454			  (u32)(tx->last_tail));
2455	tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2456							 INT_BIT_DMA_TX_
2457							 (tx->channel_number));
2458	netif_napi_add_tx_weight(adapter->netdev,
2459				 &tx->napi, lan743x_tx_napi_poll,
2460				 NAPI_POLL_WEIGHT);
2461	napi_enable(&tx->napi);
2462
2463	data = 0;
2464	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2465		data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
2466	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2467		data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
2468	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2469		data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
2470	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2471		data |= TX_CFG_C_TX_INT_EN_R2C_;
2472	lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
2473
2474	if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
2475		lan743x_csr_write(adapter, INT_EN_SET,
2476				  INT_BIT_DMA_TX_(tx->channel_number));
2477	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2478			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
2479
2480	/*  start dmac channel */
2481	lan743x_csr_write(adapter, DMAC_CMD,
2482			  DMAC_CMD_START_T_(tx->channel_number));
2483	return 0;
2484}
2485
2486static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
2487{
2488	return ((++index) % rx->ring_size);
2489}
2490
2491static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
2492{
2493	/* update the tail once per 8 descriptors */
2494	if ((index & 7) == 7)
2495		lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
2496				  index);
 
2497}
2498
2499static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
2500					gfp_t gfp)
2501{
2502	struct net_device *netdev = rx->adapter->netdev;
2503	struct device *dev = &rx->adapter->pdev->dev;
2504	struct lan743x_rx_buffer_info *buffer_info;
2505	unsigned int buffer_length, used_length;
2506	struct lan743x_rx_descriptor *descriptor;
2507	struct sk_buff *skb;
2508	dma_addr_t dma_ptr;
2509
2510	buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
2511
 
2512	descriptor = &rx->ring_cpu_ptr[index];
2513	buffer_info = &rx->buffer_info[index];
2514	skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
2515	if (!skb)
2516		return -ENOMEM;
2517	dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
2518	if (dma_mapping_error(dev, dma_ptr)) {
2519		dev_kfree_skb_any(skb);
 
 
 
 
2520		return -ENOMEM;
2521	}
2522	if (buffer_info->dma_ptr) {
2523		/* sync used area of buffer only */
2524		if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_)
2525			/* frame length is valid only if LS bit is set.
2526			 * it's a safe upper bound for the used area in this
2527			 * buffer.
2528			 */
2529			used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_
2530					  (le32_to_cpu(descriptor->data0)),
2531					  buffer_info->buffer_length);
2532		else
2533			used_length = buffer_info->buffer_length;
2534		dma_sync_single_for_cpu(dev, buffer_info->dma_ptr,
2535					used_length,
2536					DMA_FROM_DEVICE);
2537		dma_unmap_single_attrs(dev, buffer_info->dma_ptr,
2538				       buffer_info->buffer_length,
2539				       DMA_FROM_DEVICE,
2540				       DMA_ATTR_SKIP_CPU_SYNC);
2541	}
2542
2543	buffer_info->skb = skb;
2544	buffer_info->dma_ptr = dma_ptr;
2545	buffer_info->buffer_length = buffer_length;
2546	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2547	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2548	descriptor->data3 = 0;
2549	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2550			    (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2551	lan743x_rx_update_tail(rx, index);
2552
2553	return 0;
2554}
2555
2556static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
2557{
2558	struct lan743x_rx_buffer_info *buffer_info;
2559	struct lan743x_rx_descriptor *descriptor;
2560
2561	descriptor = &rx->ring_cpu_ptr[index];
2562	buffer_info = &rx->buffer_info[index];
2563
2564	descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
2565	descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
2566	descriptor->data3 = 0;
2567	descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
2568			    ((buffer_info->buffer_length) &
2569			    RX_DESC_DATA0_BUF_LENGTH_MASK_)));
2570	lan743x_rx_update_tail(rx, index);
2571}
2572
2573static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
2574{
2575	struct lan743x_rx_buffer_info *buffer_info;
2576	struct lan743x_rx_descriptor *descriptor;
2577
2578	descriptor = &rx->ring_cpu_ptr[index];
2579	buffer_info = &rx->buffer_info[index];
2580
2581	memset(descriptor, 0, sizeof(*descriptor));
2582
2583	if (buffer_info->dma_ptr) {
2584		dma_unmap_single(&rx->adapter->pdev->dev,
2585				 buffer_info->dma_ptr,
2586				 buffer_info->buffer_length,
2587				 DMA_FROM_DEVICE);
2588		buffer_info->dma_ptr = 0;
2589	}
2590
2591	if (buffer_info->skb) {
2592		dev_kfree_skb(buffer_info->skb);
2593		buffer_info->skb = NULL;
2594	}
2595
2596	memset(buffer_info, 0, sizeof(*buffer_info));
2597}
2598
2599static struct sk_buff *
2600lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
2601{
2602	if (skb_linearize(skb)) {
2603		dev_kfree_skb_irq(skb);
2604		return NULL;
2605	}
2606	frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
2607	if (skb->len > frame_length) {
2608		skb->tail -= skb->len - frame_length;
2609		skb->len = frame_length;
2610	}
2611	return skb;
2612}
2613
2614static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
2615{
2616	int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
2617	struct lan743x_rx_descriptor *descriptor, *desc_ext;
2618	struct net_device *netdev = rx->adapter->netdev;
2619	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
 
2620	struct lan743x_rx_buffer_info *buffer_info;
2621	int frame_length, buffer_length;
2622	bool is_ice, is_tce, is_icsm;
2623	int extension_index = -1;
2624	bool is_last, is_first;
2625	struct sk_buff *skb;
2626
2627	if (current_head_index < 0 || current_head_index >= rx->ring_size)
2628		goto done;
2629
2630	if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
2631		goto done;
2632
2633	if (rx->last_head == current_head_index)
2634		goto done;
2635
2636	descriptor = &rx->ring_cpu_ptr[rx->last_head];
2637	if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
2638		goto done;
2639	buffer_info = &rx->buffer_info[rx->last_head];
2640
2641	is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_;
2642	is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_;
2643
2644	if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
2645		/* extension is expected to follow */
2646		int index = lan743x_rx_next_index(rx, rx->last_head);
 
 
2647
2648		if (index == current_head_index)
2649			/* extension not yet available */
2650			goto done;
2651		desc_ext = &rx->ring_cpu_ptr[index];
2652		if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_)
2653			/* extension not yet available */
2654			goto done;
2655		if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_))
2656			goto move_forward;
2657		extension_index = index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2658	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2659
2660	/* Only the last buffer in a multi-buffer frame contains the total frame
2661	 * length. The chip occasionally sends more buffers than strictly
2662	 * required to reach the total frame length.
2663	 * Handle this by adding all buffers to the skb in their entirety.
2664	 * Once the real frame length is known, trim the skb.
2665	 */
2666	frame_length =
2667		RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
2668	buffer_length = buffer_info->buffer_length;
2669	is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_;
2670	is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_;
2671	is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_;
2672
2673	netdev_dbg(netdev, "%s%schunk: %d/%d",
2674		   is_first ? "first " : "      ",
2675		   is_last  ? "last  " : "      ",
2676		   frame_length, buffer_length);
2677
2678	/* save existing skb, allocate new skb and map to dma */
2679	skb = buffer_info->skb;
2680	if (lan743x_rx_init_ring_element(rx, rx->last_head,
2681					 GFP_ATOMIC | GFP_DMA)) {
2682		/* failed to allocate next skb.
2683		 * Memory is very low.
2684		 * Drop this packet and reuse buffer.
2685		 */
2686		lan743x_rx_reuse_ring_element(rx, rx->last_head);
2687		/* drop packet that was being assembled */
2688		dev_kfree_skb_irq(rx->skb_head);
2689		rx->skb_head = NULL;
2690		goto process_extension;
2691	}
2692
2693	/* add buffers to skb via skb->frag_list */
2694	if (is_first) {
2695		skb_reserve(skb, RX_HEAD_PADDING);
2696		skb_put(skb, buffer_length - RX_HEAD_PADDING);
2697		if (rx->skb_head)
2698			dev_kfree_skb_irq(rx->skb_head);
2699		rx->skb_head = skb;
2700	} else if (rx->skb_head) {
2701		skb_put(skb, buffer_length);
2702		if (skb_shinfo(rx->skb_head)->frag_list)
2703			rx->skb_tail->next = skb;
2704		else
2705			skb_shinfo(rx->skb_head)->frag_list = skb;
2706		rx->skb_tail = skb;
2707		rx->skb_head->len += skb->len;
2708		rx->skb_head->data_len += skb->len;
2709		rx->skb_head->truesize += skb->truesize;
2710	} else {
2711		/* packet to assemble has already been dropped because one or
2712		 * more of its buffers could not be allocated
2713		 */
2714		netdev_dbg(netdev, "drop buffer intended for dropped packet");
2715		dev_kfree_skb_irq(skb);
2716	}
2717
2718process_extension:
2719	if (extension_index >= 0) {
2720		u32 ts_sec;
2721		u32 ts_nsec;
2722
2723		ts_sec = le32_to_cpu(desc_ext->data1);
2724		ts_nsec = (le32_to_cpu(desc_ext->data2) &
2725			  RX_DESC_DATA2_TS_NS_MASK_);
2726		if (rx->skb_head)
2727			skb_hwtstamps(rx->skb_head)->hwtstamp =
2728				ktime_set(ts_sec, ts_nsec);
2729		lan743x_rx_reuse_ring_element(rx, extension_index);
2730		rx->last_head = extension_index;
2731		netdev_dbg(netdev, "process extension");
2732	}
2733
2734	if (is_last && rx->skb_head)
2735		rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length);
2736
2737	if (is_last && rx->skb_head) {
2738		rx->skb_head->protocol = eth_type_trans(rx->skb_head,
2739							rx->adapter->netdev);
2740		if (rx->adapter->netdev->features & NETIF_F_RXCSUM) {
2741			if (!is_ice && !is_tce && !is_icsm)
2742				skb->ip_summed = CHECKSUM_UNNECESSARY;
2743		}
2744		netdev_dbg(netdev, "sending %d byte frame to OS",
2745			   rx->skb_head->len);
2746		napi_gro_receive(&rx->napi, rx->skb_head);
2747		rx->skb_head = NULL;
2748	}
2749
2750move_forward:
2751	/* push tail and head forward */
2752	rx->last_tail = rx->last_head;
2753	rx->last_head = lan743x_rx_next_index(rx, rx->last_head);
2754	result = RX_PROCESS_RESULT_BUFFER_RECEIVED;
2755done:
2756	return result;
2757}
2758
2759static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
2760{
2761	struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
2762	struct lan743x_adapter *adapter = rx->adapter;
2763	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2764	u32 rx_tail_flags = 0;
2765	int count;
2766
2767	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
2768		/* clear int status bit before reading packet */
2769		lan743x_csr_write(adapter, DMAC_INT_STS,
2770				  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2771	}
2772	for (count = 0; count < weight; count++) {
2773		result = lan743x_rx_process_buffer(rx);
2774		if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
 
 
 
 
 
2775			break;
 
 
 
 
2776	}
2777	rx->frame_count += count;
2778	if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED)
2779		return weight;
2780
2781	if (!napi_complete_done(napi, count))
2782		return count;
2783
2784	/* re-arm interrupts, must write to rx tail on some chip variants */
2785	if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2786		rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
2787	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
2788		rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
2789	} else {
2790		lan743x_csr_write(adapter, INT_EN_SET,
2791				  INT_BIT_DMA_RX_(rx->channel_number));
2792	}
2793
2794	if (rx_tail_flags)
2795		lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2796				  rx_tail_flags | rx->last_tail);
2797
2798	return count;
2799}
2800
2801static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
2802{
2803	if (rx->buffer_info && rx->ring_cpu_ptr) {
2804		int index;
2805
2806		for (index = 0; index < rx->ring_size; index++)
2807			lan743x_rx_release_ring_element(rx, index);
2808	}
2809
2810	if (rx->head_cpu_ptr) {
2811		dma_free_coherent(&rx->adapter->pdev->dev,
2812				  sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr,
2813				  rx->head_dma_ptr);
2814		rx->head_cpu_ptr = NULL;
2815		rx->head_dma_ptr = 0;
2816	}
2817
2818	kfree(rx->buffer_info);
2819	rx->buffer_info = NULL;
2820
2821	if (rx->ring_cpu_ptr) {
2822		dma_free_coherent(&rx->adapter->pdev->dev,
2823				  rx->ring_allocation_size, rx->ring_cpu_ptr,
2824				  rx->ring_dma_ptr);
2825		rx->ring_allocation_size = 0;
2826		rx->ring_cpu_ptr = NULL;
2827		rx->ring_dma_ptr = 0;
2828	}
2829
2830	rx->ring_size = 0;
2831	rx->last_head = 0;
2832}
2833
2834static int lan743x_rx_ring_init(struct lan743x_rx *rx)
2835{
2836	size_t ring_allocation_size = 0;
2837	dma_addr_t dma_ptr = 0;
2838	void *cpu_ptr = NULL;
2839	int ret = -ENOMEM;
2840	int index = 0;
2841
2842	rx->ring_size = LAN743X_RX_RING_SIZE;
2843	if (rx->ring_size <= 1) {
2844		ret = -EINVAL;
2845		goto cleanup;
2846	}
2847	if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
2848		ret = -EINVAL;
2849		goto cleanup;
2850	}
2851	if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
2852				      DMA_BIT_MASK(64))) {
2853		dev_warn(&rx->adapter->pdev->dev,
2854			 "lan743x_: No suitable DMA available\n");
2855		ret = -ENOMEM;
2856		goto cleanup;
2857	}
2858	ring_allocation_size = ALIGN(rx->ring_size *
2859				     sizeof(struct lan743x_rx_descriptor),
2860				     PAGE_SIZE);
2861	dma_ptr = 0;
2862	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2863				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2864	if (!cpu_ptr) {
2865		ret = -ENOMEM;
2866		goto cleanup;
2867	}
2868	rx->ring_allocation_size = ring_allocation_size;
2869	rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
2870	rx->ring_dma_ptr = dma_ptr;
2871
2872	cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
2873			  GFP_KERNEL);
2874	if (!cpu_ptr) {
2875		ret = -ENOMEM;
2876		goto cleanup;
2877	}
2878	rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
2879	dma_ptr = 0;
2880	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2881				     sizeof(*rx->head_cpu_ptr), &dma_ptr,
2882				     GFP_KERNEL);
2883	if (!cpu_ptr) {
2884		ret = -ENOMEM;
2885		goto cleanup;
2886	}
2887
2888	rx->head_cpu_ptr = cpu_ptr;
2889	rx->head_dma_ptr = dma_ptr;
2890	if (rx->head_dma_ptr & 0x3) {
2891		ret = -ENOMEM;
2892		goto cleanup;
2893	}
2894
2895	rx->last_head = 0;
2896	for (index = 0; index < rx->ring_size; index++) {
2897		ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
 
 
2898		if (ret)
2899			goto cleanup;
2900	}
2901	return 0;
2902
2903cleanup:
2904	netif_warn(rx->adapter, ifup, rx->adapter->netdev,
2905		   "Error allocating memory for LAN743x\n");
2906
2907	lan743x_rx_ring_cleanup(rx);
2908	return ret;
2909}
2910
2911static void lan743x_rx_close(struct lan743x_rx *rx)
2912{
2913	struct lan743x_adapter *adapter = rx->adapter;
2914
2915	lan743x_csr_write(adapter, FCT_RX_CTL,
2916			  FCT_RX_CTL_DIS_(rx->channel_number));
2917	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2918				 FCT_RX_CTL_EN_(rx->channel_number),
2919				 0, 1000, 20000, 100);
2920
2921	lan743x_csr_write(adapter, DMAC_CMD,
2922			  DMAC_CMD_STOP_R_(rx->channel_number));
2923	lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
2924
2925	lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
2926			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2927	lan743x_csr_write(adapter, INT_EN_CLR,
2928			  INT_BIT_DMA_RX_(rx->channel_number));
2929	napi_disable(&rx->napi);
2930
2931	netif_napi_del(&rx->napi);
2932
2933	lan743x_rx_ring_cleanup(rx);
2934}
2935
2936static int lan743x_rx_open(struct lan743x_rx *rx)
2937{
2938	struct lan743x_adapter *adapter = rx->adapter;
2939	u32 data = 0;
2940	int ret;
2941
2942	rx->frame_count = 0;
2943	ret = lan743x_rx_ring_init(rx);
2944	if (ret)
2945		goto return_error;
2946
2947	netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll);
 
 
2948
2949	lan743x_csr_write(adapter, DMAC_CMD,
2950			  DMAC_CMD_RX_SWR_(rx->channel_number));
2951	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2952				 DMAC_CMD_RX_SWR_(rx->channel_number),
2953				 0, 1000, 20000, 100);
2954
2955	/* set ring base address */
2956	lan743x_csr_write(adapter,
2957			  RX_BASE_ADDRH(rx->channel_number),
2958			  DMA_ADDR_HIGH32(rx->ring_dma_ptr));
2959	lan743x_csr_write(adapter,
2960			  RX_BASE_ADDRL(rx->channel_number),
2961			  DMA_ADDR_LOW32(rx->ring_dma_ptr));
2962
2963	/* set rx write back address */
2964	lan743x_csr_write(adapter,
2965			  RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
2966			  DMA_ADDR_HIGH32(rx->head_dma_ptr));
2967	lan743x_csr_write(adapter,
2968			  RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
2969			  DMA_ADDR_LOW32(rx->head_dma_ptr));
2970	data = RX_CFG_A_RX_HP_WB_EN_;
2971	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2972		data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
2973			RX_CFG_A_RX_WB_THRES_SET_(0x7) |
2974			RX_CFG_A_RX_PF_THRES_SET_(16) |
2975			RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
2976	}
2977
2978	/* set RX_CFG_A */
2979	lan743x_csr_write(adapter,
2980			  RX_CFG_A(rx->channel_number), data);
2981
2982	/* set RX_CFG_B */
2983	data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
2984	data &= ~RX_CFG_B_RX_PAD_MASK_;
2985	if (!RX_HEAD_PADDING)
2986		data |= RX_CFG_B_RX_PAD_0_;
2987	else
2988		data |= RX_CFG_B_RX_PAD_2_;
2989	data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
2990	data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
 
2991	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2992		data |= RX_CFG_B_RDMABL_512_;
2993
2994	lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
2995	rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2996							 INT_BIT_DMA_RX_
2997							 (rx->channel_number));
2998
2999	/* set RX_CFG_C */
3000	data = 0;
3001	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
3002		data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
3003	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
3004		data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
3005	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
3006		data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
3007	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
3008		data |= RX_CFG_C_RX_INT_EN_R2C_;
3009	lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
3010
3011	rx->last_tail = ((u32)(rx->ring_size - 1));
3012	lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
3013			  rx->last_tail);
3014	rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
3015	if (rx->last_head) {
3016		ret = -EIO;
3017		goto napi_delete;
3018	}
3019
3020	napi_enable(&rx->napi);
3021
3022	lan743x_csr_write(adapter, INT_EN_SET,
3023			  INT_BIT_DMA_RX_(rx->channel_number));
3024	lan743x_csr_write(adapter, DMAC_INT_STS,
3025			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
3026	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
3027			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
3028	lan743x_csr_write(adapter, DMAC_CMD,
3029			  DMAC_CMD_START_R_(rx->channel_number));
3030
3031	/* initialize fifo */
3032	lan743x_csr_write(adapter, FCT_RX_CTL,
3033			  FCT_RX_CTL_RESET_(rx->channel_number));
3034	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
3035				 FCT_RX_CTL_RESET_(rx->channel_number),
3036				 0, 1000, 20000, 100);
3037	lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
3038			  FCT_FLOW_CTL_REQ_EN_ |
3039			  FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
3040			  FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
3041
3042	/* enable fifo */
3043	lan743x_csr_write(adapter, FCT_RX_CTL,
3044			  FCT_RX_CTL_EN_(rx->channel_number));
3045	return 0;
3046
3047napi_delete:
3048	netif_napi_del(&rx->napi);
3049	lan743x_rx_ring_cleanup(rx);
3050
3051return_error:
3052	return ret;
3053}
3054
3055static int lan743x_netdev_close(struct net_device *netdev)
3056{
3057	struct lan743x_adapter *adapter = netdev_priv(netdev);
3058	int index;
3059
3060	for (index = 0; index < adapter->used_tx_channels; index++)
3061		lan743x_tx_close(&adapter->tx[index]);
3062
3063	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
3064		lan743x_rx_close(&adapter->rx[index]);
3065
3066	lan743x_ptp_close(adapter);
3067
3068	lan743x_phy_close(adapter);
3069
3070	lan743x_mac_close(adapter);
3071
3072	lan743x_intr_close(adapter);
3073
3074	return 0;
3075}
3076
3077static int lan743x_netdev_open(struct net_device *netdev)
3078{
3079	struct lan743x_adapter *adapter = netdev_priv(netdev);
3080	int index;
3081	int ret;
3082
3083	ret = lan743x_intr_open(adapter);
3084	if (ret)
3085		goto return_error;
3086
3087	ret = lan743x_mac_open(adapter);
3088	if (ret)
3089		goto close_intr;
3090
3091	ret = lan743x_phy_open(adapter);
3092	if (ret)
3093		goto close_mac;
3094
3095	ret = lan743x_ptp_open(adapter);
3096	if (ret)
3097		goto close_phy;
3098
3099	lan743x_rfe_open(adapter);
3100
3101	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3102		ret = lan743x_rx_open(&adapter->rx[index]);
3103		if (ret)
3104			goto close_rx;
3105	}
3106
3107	for (index = 0; index < adapter->used_tx_channels; index++) {
3108		ret = lan743x_tx_open(&adapter->tx[index]);
3109		if (ret)
3110			goto close_tx;
3111	}
3112	return 0;
3113
3114close_tx:
3115	for (index = 0; index < adapter->used_tx_channels; index++) {
3116		if (adapter->tx[index].ring_cpu_ptr)
3117			lan743x_tx_close(&adapter->tx[index]);
3118	}
3119
3120close_rx:
3121	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3122		if (adapter->rx[index].ring_cpu_ptr)
3123			lan743x_rx_close(&adapter->rx[index]);
3124	}
3125	lan743x_ptp_close(adapter);
3126
3127close_phy:
3128	lan743x_phy_close(adapter);
3129
3130close_mac:
3131	lan743x_mac_close(adapter);
3132
3133close_intr:
3134	lan743x_intr_close(adapter);
3135
3136return_error:
3137	netif_warn(adapter, ifup, adapter->netdev,
3138		   "Error opening LAN743x\n");
3139	return ret;
3140}
3141
3142static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
3143					     struct net_device *netdev)
3144{
3145	struct lan743x_adapter *adapter = netdev_priv(netdev);
3146	u8 ch = 0;
3147
3148	if (adapter->is_pci11x1x)
3149		ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS;
3150
3151	return lan743x_tx_xmit_frame(&adapter->tx[ch], skb);
3152}
3153
3154static int lan743x_netdev_ioctl(struct net_device *netdev,
3155				struct ifreq *ifr, int cmd)
3156{
3157	if (!netif_running(netdev))
3158		return -EINVAL;
3159	if (cmd == SIOCSHWTSTAMP)
3160		return lan743x_ptp_ioctl(netdev, ifr, cmd);
3161	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
3162}
3163
3164static void lan743x_netdev_set_multicast(struct net_device *netdev)
3165{
3166	struct lan743x_adapter *adapter = netdev_priv(netdev);
3167
3168	lan743x_rfe_set_multicast(adapter);
3169}
3170
3171static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
3172{
3173	struct lan743x_adapter *adapter = netdev_priv(netdev);
3174	int ret = 0;
3175
3176	ret = lan743x_mac_set_mtu(adapter, new_mtu);
3177	if (!ret)
3178		netdev->mtu = new_mtu;
3179	return ret;
3180}
3181
3182static void lan743x_netdev_get_stats64(struct net_device *netdev,
3183				       struct rtnl_link_stats64 *stats)
3184{
3185	struct lan743x_adapter *adapter = netdev_priv(netdev);
3186
3187	stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
3188	stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
3189	stats->rx_bytes = lan743x_csr_read(adapter,
3190					   STAT_RX_UNICAST_BYTE_COUNT) +
3191			  lan743x_csr_read(adapter,
3192					   STAT_RX_BROADCAST_BYTE_COUNT) +
3193			  lan743x_csr_read(adapter,
3194					   STAT_RX_MULTICAST_BYTE_COUNT);
3195	stats->tx_bytes = lan743x_csr_read(adapter,
3196					   STAT_TX_UNICAST_BYTE_COUNT) +
3197			  lan743x_csr_read(adapter,
3198					   STAT_TX_BROADCAST_BYTE_COUNT) +
3199			  lan743x_csr_read(adapter,
3200					   STAT_TX_MULTICAST_BYTE_COUNT);
3201	stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
3202			   lan743x_csr_read(adapter,
3203					    STAT_RX_ALIGNMENT_ERRORS) +
3204			   lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
3205			   lan743x_csr_read(adapter,
3206					    STAT_RX_UNDERSIZE_FRAME_ERRORS) +
3207			   lan743x_csr_read(adapter,
3208					    STAT_RX_OVERSIZE_FRAME_ERRORS);
3209	stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
3210			   lan743x_csr_read(adapter,
3211					    STAT_TX_EXCESS_DEFERRAL_ERRORS) +
3212			   lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
3213	stats->rx_dropped = lan743x_csr_read(adapter,
3214					     STAT_RX_DROPPED_FRAMES);
3215	stats->tx_dropped = lan743x_csr_read(adapter,
3216					     STAT_TX_EXCESSIVE_COLLISION);
3217	stats->multicast = lan743x_csr_read(adapter,
3218					    STAT_RX_MULTICAST_FRAMES) +
3219			   lan743x_csr_read(adapter,
3220					    STAT_TX_MULTICAST_FRAMES);
3221	stats->collisions = lan743x_csr_read(adapter,
3222					     STAT_TX_SINGLE_COLLISIONS) +
3223			    lan743x_csr_read(adapter,
3224					     STAT_TX_MULTIPLE_COLLISIONS) +
3225			    lan743x_csr_read(adapter,
3226					     STAT_TX_LATE_COLLISIONS);
3227}
3228
3229static int lan743x_netdev_set_mac_address(struct net_device *netdev,
3230					  void *addr)
3231{
3232	struct lan743x_adapter *adapter = netdev_priv(netdev);
3233	struct sockaddr *sock_addr = addr;
3234	int ret;
3235
3236	ret = eth_prepare_mac_addr_change(netdev, sock_addr);
3237	if (ret)
3238		return ret;
3239	eth_hw_addr_set(netdev, sock_addr->sa_data);
3240	lan743x_mac_set_address(adapter, sock_addr->sa_data);
3241	lan743x_rfe_update_mac_address(adapter);
3242	return 0;
3243}
3244
3245static const struct net_device_ops lan743x_netdev_ops = {
3246	.ndo_open		= lan743x_netdev_open,
3247	.ndo_stop		= lan743x_netdev_close,
3248	.ndo_start_xmit		= lan743x_netdev_xmit_frame,
3249	.ndo_eth_ioctl		= lan743x_netdev_ioctl,
3250	.ndo_set_rx_mode	= lan743x_netdev_set_multicast,
3251	.ndo_change_mtu		= lan743x_netdev_change_mtu,
3252	.ndo_get_stats64	= lan743x_netdev_get_stats64,
3253	.ndo_set_mac_address	= lan743x_netdev_set_mac_address,
3254};
3255
3256static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
3257{
3258	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
3259}
3260
3261static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
3262{
3263	mdiobus_unregister(adapter->mdiobus);
3264}
3265
3266static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
3267{
3268	unregister_netdev(adapter->netdev);
3269
3270	lan743x_mdiobus_cleanup(adapter);
3271	lan743x_hardware_cleanup(adapter);
3272	lan743x_pci_cleanup(adapter);
3273}
3274
3275static int lan743x_hardware_init(struct lan743x_adapter *adapter,
3276				 struct pci_dev *pdev)
3277{
3278	struct lan743x_tx *tx;
3279	int index;
3280	int ret;
3281
3282	adapter->is_pci11x1x = is_pci11x1x_chip(adapter);
3283	if (adapter->is_pci11x1x) {
3284		adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS;
3285		adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
3286		adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
3287		pci11x1x_strap_get_status(adapter);
3288		spin_lock_init(&adapter->eth_syslock_spinlock);
3289		mutex_init(&adapter->sgmii_rw_lock);
3290	} else {
3291		adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
3292		adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
3293		adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT;
3294	}
3295
3296	adapter->intr.irq = adapter->pdev->irq;
3297	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
 
3298
3299	ret = lan743x_gpio_init(adapter);
3300	if (ret)
3301		return ret;
3302
3303	ret = lan743x_mac_init(adapter);
3304	if (ret)
3305		return ret;
3306
3307	ret = lan743x_phy_init(adapter);
3308	if (ret)
3309		return ret;
3310
3311	ret = lan743x_ptp_init(adapter);
3312	if (ret)
3313		return ret;
3314
3315	lan743x_rfe_update_mac_address(adapter);
3316
3317	ret = lan743x_dmac_init(adapter);
3318	if (ret)
3319		return ret;
3320
3321	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
3322		adapter->rx[index].adapter = adapter;
3323		adapter->rx[index].channel_number = index;
3324	}
3325
3326	for (index = 0; index < adapter->used_tx_channels; index++) {
3327		tx = &adapter->tx[index];
3328		tx->adapter = adapter;
3329		tx->channel_number = index;
3330		spin_lock_init(&tx->ring_lock);
3331	}
3332
3333	return 0;
3334}
3335
3336static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
3337{
3338	u32 sgmii_ctl;
3339	int ret;
3340
3341	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
3342	if (!(adapter->mdiobus)) {
3343		ret = -ENOMEM;
3344		goto return_error;
3345	}
3346
3347	adapter->mdiobus->priv = (void *)adapter;
3348	if (adapter->is_pci11x1x) {
3349		if (adapter->is_sgmii_en) {
3350			sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
3351			sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
3352			sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
3353			lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
3354			netif_dbg(adapter, drv, adapter->netdev,
3355				  "SGMII operation\n");
3356			adapter->mdiobus->read = lan743x_mdiobus_read_c22;
3357			adapter->mdiobus->write = lan743x_mdiobus_write_c22;
3358			adapter->mdiobus->read_c45 = lan743x_mdiobus_read_c45;
3359			adapter->mdiobus->write_c45 = lan743x_mdiobus_write_c45;
3360			adapter->mdiobus->name = "lan743x-mdiobus-c45";
3361			netif_dbg(adapter, drv, adapter->netdev,
3362				  "lan743x-mdiobus-c45\n");
3363		} else {
3364			sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
3365			sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
3366			sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
3367			lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
3368			netif_dbg(adapter, drv, adapter->netdev,
3369				  "RGMII operation\n");
3370			// Only C22 support when RGMII I/F
3371			adapter->mdiobus->read = lan743x_mdiobus_read_c22;
3372			adapter->mdiobus->write = lan743x_mdiobus_write_c22;
3373			adapter->mdiobus->name = "lan743x-mdiobus";
3374			netif_dbg(adapter, drv, adapter->netdev,
3375				  "lan743x-mdiobus\n");
3376		}
3377	} else {
3378		adapter->mdiobus->read = lan743x_mdiobus_read_c22;
3379		adapter->mdiobus->write = lan743x_mdiobus_write_c22;
3380		adapter->mdiobus->name = "lan743x-mdiobus";
3381		netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
3382	}
3383
3384	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
3385		 "pci-%s", pci_name(adapter->pdev));
3386
3387	if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
3388		/* LAN7430 uses internal phy at address 1 */
3389		adapter->mdiobus->phy_mask = ~(u32)BIT(1);
3390
3391	/* register mdiobus */
3392	ret = mdiobus_register(adapter->mdiobus);
3393	if (ret < 0)
3394		goto return_error;
3395	return 0;
3396
3397return_error:
3398	return ret;
3399}
3400
3401/* lan743x_pcidev_probe - Device Initialization Routine
3402 * @pdev: PCI device information struct
3403 * @id: entry in lan743x_pci_tbl
3404 *
3405 * Returns 0 on success, negative on failure
3406 *
3407 * initializes an adapter identified by a pci_dev structure.
3408 * The OS initialization, configuring of the adapter private structure,
3409 * and a hardware reset occur.
3410 **/
3411static int lan743x_pcidev_probe(struct pci_dev *pdev,
3412				const struct pci_device_id *id)
3413{
3414	struct lan743x_adapter *adapter = NULL;
3415	struct net_device *netdev = NULL;
 
3416	int ret = -ENODEV;
3417
3418	if (id->device == PCI_DEVICE_ID_SMSC_A011 ||
3419	    id->device == PCI_DEVICE_ID_SMSC_A041) {
3420		netdev = devm_alloc_etherdev_mqs(&pdev->dev,
3421						 sizeof(struct lan743x_adapter),
3422						 PCI11X1X_USED_TX_CHANNELS,
3423						 LAN743X_USED_RX_CHANNELS);
3424	} else {
3425		netdev = devm_alloc_etherdev_mqs(&pdev->dev,
3426						 sizeof(struct lan743x_adapter),
3427						 LAN743X_USED_TX_CHANNELS,
3428						 LAN743X_USED_RX_CHANNELS);
3429	}
3430
3431	if (!netdev)
3432		goto return_error;
3433
3434	SET_NETDEV_DEV(netdev, &pdev->dev);
3435	pci_set_drvdata(pdev, netdev);
3436	adapter = netdev_priv(netdev);
3437	adapter->netdev = netdev;
3438	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
3439			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
3440			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
3441	netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
3442
3443	of_get_mac_address(pdev->dev.of_node, adapter->mac_address);
 
 
3444
3445	ret = lan743x_pci_init(adapter, pdev);
3446	if (ret)
3447		goto return_error;
3448
3449	ret = lan743x_csr_init(adapter);
3450	if (ret)
3451		goto cleanup_pci;
3452
3453	ret = lan743x_hardware_init(adapter, pdev);
3454	if (ret)
3455		goto cleanup_pci;
3456
3457	ret = lan743x_mdiobus_init(adapter);
3458	if (ret)
3459		goto cleanup_hardware;
3460
3461	adapter->netdev->netdev_ops = &lan743x_netdev_ops;
3462	adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
3463	adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO |
3464				    NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3465	adapter->netdev->hw_features = adapter->netdev->features;
3466
3467	/* carrier off reporting is important to ethtool even BEFORE open */
3468	netif_carrier_off(netdev);
3469
3470	ret = register_netdev(adapter->netdev);
3471	if (ret < 0)
3472		goto cleanup_mdiobus;
3473	return 0;
3474
3475cleanup_mdiobus:
3476	lan743x_mdiobus_cleanup(adapter);
3477
3478cleanup_hardware:
3479	lan743x_hardware_cleanup(adapter);
3480
3481cleanup_pci:
3482	lan743x_pci_cleanup(adapter);
3483
3484return_error:
3485	pr_warn("Initialization failed\n");
3486	return ret;
3487}
3488
3489/**
3490 * lan743x_pcidev_remove - Device Removal Routine
3491 * @pdev: PCI device information struct
3492 *
3493 * this is called by the PCI subsystem to alert the driver
3494 * that it should release a PCI device.  This could be caused by a
3495 * Hot-Plug event, or because the driver is going to be removed from
3496 * memory.
3497 **/
3498static void lan743x_pcidev_remove(struct pci_dev *pdev)
3499{
3500	struct net_device *netdev = pci_get_drvdata(pdev);
3501	struct lan743x_adapter *adapter = netdev_priv(netdev);
3502
3503	lan743x_full_cleanup(adapter);
3504}
3505
3506static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
3507{
3508	struct net_device *netdev = pci_get_drvdata(pdev);
3509	struct lan743x_adapter *adapter = netdev_priv(netdev);
3510
3511	rtnl_lock();
3512	netif_device_detach(netdev);
3513
3514	/* close netdev when netdev is at running state.
3515	 * For instance, it is true when system goes to sleep by pm-suspend
3516	 * However, it is false when system goes to sleep by suspend GUI menu
3517	 */
3518	if (netif_running(netdev))
3519		lan743x_netdev_close(netdev);
3520	rtnl_unlock();
3521
3522#ifdef CONFIG_PM
3523	pci_save_state(pdev);
3524#endif
3525
3526	/* clean up lan743x portion */
3527	lan743x_hardware_cleanup(adapter);
3528}
3529
3530#ifdef CONFIG_PM_SLEEP
3531static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
3532{
3533	return bitrev16(crc16(0xFFFF, buf, len));
3534}
3535
3536static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
3537{
3538	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3539	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3540	const u8 arp_type[2] = { 0x08, 0x06 };
3541	int mask_index;
3542	u32 sopass;
3543	u32 pmtctl;
3544	u32 wucsr;
3545	u32 macrx;
3546	u16 crc;
3547
3548	for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
3549		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
3550
3551	/* clear wake settings */
3552	pmtctl = lan743x_csr_read(adapter, PMT_CTL);
3553	pmtctl |= PMT_CTL_WUPS_MASK_;
3554	pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
3555		PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
3556		PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
3557
3558	macrx = lan743x_csr_read(adapter, MAC_RX);
3559
3560	wucsr = 0;
3561	mask_index = 0;
3562
3563	pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
3564
3565	if (adapter->wolopts & WAKE_PHY) {
3566		pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
3567		pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
3568	}
3569	if (adapter->wolopts & WAKE_MAGIC) {
3570		wucsr |= MAC_WUCSR_MPEN_;
3571		macrx |= MAC_RX_RXEN_;
3572		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3573	}
3574	if (adapter->wolopts & WAKE_UCAST) {
3575		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
3576		macrx |= MAC_RX_RXEN_;
3577		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3578		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3579	}
3580	if (adapter->wolopts & WAKE_BCAST) {
3581		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
3582		macrx |= MAC_RX_RXEN_;
3583		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3584		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3585	}
3586	if (adapter->wolopts & WAKE_MCAST) {
3587		/* IPv4 multicast */
3588		crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
3589		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3590				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3591				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3592				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3593		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
3594		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3595		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3596		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3597		mask_index++;
3598
3599		/* IPv6 multicast */
3600		crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
3601		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3602				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
3603				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3604				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3605		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
3606		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3607		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3608		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3609		mask_index++;
3610
3611		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3612		macrx |= MAC_RX_RXEN_;
3613		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3614		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3615	}
3616	if (adapter->wolopts & WAKE_ARP) {
3617		/* set MAC_WUF_CFG & WUF_MASK
3618		 * for packettype (offset 12,13) = ARP (0x0806)
3619		 */
3620		crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
3621		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3622				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
3623				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3624				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3625		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
3626		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3627		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3628		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3629		mask_index++;
3630
3631		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3632		macrx |= MAC_RX_RXEN_;
3633		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3634		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3635	}
3636
3637	if (adapter->wolopts & WAKE_MAGICSECURE) {
3638		sopass = *(u32 *)adapter->sopass;
3639		lan743x_csr_write(adapter, MAC_MP_SO_LO, sopass);
3640		sopass = *(u16 *)&adapter->sopass[4];
3641		lan743x_csr_write(adapter, MAC_MP_SO_HI, sopass);
3642		wucsr |= MAC_MP_SO_EN_;
3643	}
3644
3645	lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
3646	lan743x_csr_write(adapter, PMT_CTL, pmtctl);
3647	lan743x_csr_write(adapter, MAC_RX, macrx);
3648}
3649
3650static int lan743x_pm_suspend(struct device *dev)
3651{
3652	struct pci_dev *pdev = to_pci_dev(dev);
3653	struct net_device *netdev = pci_get_drvdata(pdev);
3654	struct lan743x_adapter *adapter = netdev_priv(netdev);
3655	u32 data;
3656
3657	lan743x_pcidev_shutdown(pdev);
3658
3659	/* clear all wakes */
3660	lan743x_csr_write(adapter, MAC_WUCSR, 0);
3661	lan743x_csr_write(adapter, MAC_WUCSR2, 0);
3662	lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
3663
3664	if (adapter->wolopts)
3665		lan743x_pm_set_wol(adapter);
3666
3667	if (adapter->is_pci11x1x) {
3668		/* Save HW_CFG to config again in PM resume */
3669		data = lan743x_csr_read(adapter, HW_CFG);
3670		adapter->hw_cfg = data;
3671		data |= (HW_CFG_RST_PROTECT_PCIE_ |
3672			 HW_CFG_D3_RESET_DIS_ |
3673			 HW_CFG_D3_VAUX_OVR_ |
3674			 HW_CFG_HOT_RESET_DIS_ |
3675			 HW_CFG_RST_PROTECT_);
3676		lan743x_csr_write(adapter, HW_CFG, data);
3677	}
3678
3679	/* Host sets PME_En, put D3hot */
3680	return pci_prepare_to_sleep(pdev);
 
 
3681}
3682
3683static int lan743x_pm_resume(struct device *dev)
3684{
3685	struct pci_dev *pdev = to_pci_dev(dev);
3686	struct net_device *netdev = pci_get_drvdata(pdev);
3687	struct lan743x_adapter *adapter = netdev_priv(netdev);
3688	int ret;
3689
3690	pci_set_power_state(pdev, PCI_D0);
3691	pci_restore_state(pdev);
3692	pci_save_state(pdev);
3693
3694	/* Restore HW_CFG that was saved during pm suspend */
3695	if (adapter->is_pci11x1x)
3696		lan743x_csr_write(adapter, HW_CFG, adapter->hw_cfg);
3697
3698	ret = lan743x_hardware_init(adapter, pdev);
3699	if (ret) {
3700		netif_err(adapter, probe, adapter->netdev,
3701			  "lan743x_hardware_init returned %d\n", ret);
3702		lan743x_pci_cleanup(adapter);
3703		return ret;
3704	}
3705
3706	/* open netdev when netdev is at running state while resume.
3707	 * For instance, it is true when system wakesup after pm-suspend
3708	 * However, it is false when system wakes up after suspend GUI menu
3709	 */
3710	if (netif_running(netdev))
3711		lan743x_netdev_open(netdev);
3712
3713	netif_device_attach(netdev);
3714	ret = lan743x_csr_read(adapter, MAC_WK_SRC);
3715	netif_info(adapter, drv, adapter->netdev,
3716		   "Wakeup source : 0x%08X\n", ret);
3717
3718	return 0;
3719}
3720
3721static const struct dev_pm_ops lan743x_pm_ops = {
3722	SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3723};
3724#endif /* CONFIG_PM_SLEEP */
3725
3726static const struct pci_device_id lan743x_pcidev_tbl[] = {
3727	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
3728	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
3729	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) },
3730	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) },
3731	{ 0, }
3732};
3733
3734MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
3735
3736static struct pci_driver lan743x_pcidev_driver = {
3737	.name     = DRIVER_NAME,
3738	.id_table = lan743x_pcidev_tbl,
3739	.probe    = lan743x_pcidev_probe,
3740	.remove   = lan743x_pcidev_remove,
3741#ifdef CONFIG_PM_SLEEP
3742	.driver.pm = &lan743x_pm_ops,
3743#endif
3744	.shutdown = lan743x_pcidev_shutdown,
3745};
3746
3747module_pci_driver(lan743x_pcidev_driver);
3748
3749MODULE_AUTHOR(DRIVER_AUTHOR);
3750MODULE_DESCRIPTION(DRIVER_DESC);
3751MODULE_LICENSE("GPL");
v5.9
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/* Copyright (C) 2018 Microchip Technology Inc. */
   3
   4#include <linux/module.h>
   5#include <linux/pci.h>
   6#include <linux/netdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/crc32.h>
   9#include <linux/microchipphy.h>
  10#include <linux/net_tstamp.h>
  11#include <linux/of_mdio.h>
  12#include <linux/of_net.h>
  13#include <linux/phy.h>
  14#include <linux/phy_fixed.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/iopoll.h>
  17#include <linux/crc16.h>
  18#include "lan743x_main.h"
  19#include "lan743x_ethtool.h"
  20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  21static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
  22{
  23	pci_release_selected_regions(adapter->pdev,
  24				     pci_select_bars(adapter->pdev,
  25						     IORESOURCE_MEM));
  26	pci_disable_device(adapter->pdev);
  27}
  28
  29static int lan743x_pci_init(struct lan743x_adapter *adapter,
  30			    struct pci_dev *pdev)
  31{
  32	unsigned long bars = 0;
  33	int ret;
  34
  35	adapter->pdev = pdev;
  36	ret = pci_enable_device_mem(pdev);
  37	if (ret)
  38		goto return_error;
  39
  40	netif_info(adapter, probe, adapter->netdev,
  41		   "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
  42		   pdev->vendor, pdev->device);
  43	bars = pci_select_bars(pdev, IORESOURCE_MEM);
  44	if (!test_bit(0, &bars))
  45		goto disable_device;
  46
  47	ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
  48	if (ret)
  49		goto disable_device;
  50
  51	pci_set_master(pdev);
  52	return 0;
  53
  54disable_device:
  55	pci_disable_device(adapter->pdev);
  56
  57return_error:
  58	return ret;
  59}
  60
  61u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
  62{
  63	return ioread32(&adapter->csr.csr_address[offset]);
  64}
  65
  66void lan743x_csr_write(struct lan743x_adapter *adapter, int offset,
  67		       u32 data)
  68{
  69	iowrite32(data, &adapter->csr.csr_address[offset]);
  70}
  71
  72#define LAN743X_CSR_READ_OP(offset)	lan743x_csr_read(adapter, offset)
  73
  74static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
  75{
  76	u32 data;
  77
  78	data = lan743x_csr_read(adapter, HW_CFG);
  79	data |= HW_CFG_LRST_;
  80	lan743x_csr_write(adapter, HW_CFG, data);
  81
  82	return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
  83				  !(data & HW_CFG_LRST_), 100000, 10000000);
  84}
  85
 
 
 
 
 
 
 
 
 
 
 
 
  86static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
  87				    int offset, u32 bit_mask,
  88				    int target_value, int usleep_min,
  89				    int usleep_max, int count)
  90{
  91	u32 data;
  92
  93	return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
  94				  target_value == ((data & bit_mask) ? 1 : 0),
  95				  usleep_max, usleep_min * count);
  96}
  97
  98static int lan743x_csr_init(struct lan743x_adapter *adapter)
  99{
 100	struct lan743x_csr *csr = &adapter->csr;
 101	resource_size_t bar_start, bar_length;
 102	int result;
 103
 104	bar_start = pci_resource_start(adapter->pdev, 0);
 105	bar_length = pci_resource_len(adapter->pdev, 0);
 106	csr->csr_address = devm_ioremap(&adapter->pdev->dev,
 107					bar_start, bar_length);
 108	if (!csr->csr_address) {
 109		result = -ENOMEM;
 110		goto clean_up;
 111	}
 112
 113	csr->id_rev = lan743x_csr_read(adapter, ID_REV);
 114	csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
 115	netif_info(adapter, probe, adapter->netdev,
 116		   "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
 117		   csr->id_rev,	FPGA_REV_GET_MAJOR_(csr->fpga_rev),
 118		   FPGA_REV_GET_MINOR_(csr->fpga_rev));
 119	if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) {
 120		result = -ENODEV;
 121		goto clean_up;
 122	}
 123
 124	csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 125	switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
 126	case ID_REV_CHIP_REV_A0_:
 127		csr->flags |= LAN743X_CSR_FLAG_IS_A0;
 128		csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
 129		break;
 130	case ID_REV_CHIP_REV_B0_:
 131		csr->flags |= LAN743X_CSR_FLAG_IS_B0;
 132		break;
 133	}
 134
 135	result = lan743x_csr_light_reset(adapter);
 136	if (result)
 137		goto clean_up;
 138	return 0;
 139clean_up:
 140	return result;
 141}
 142
 143static void lan743x_intr_software_isr(void *context)
 144{
 145	struct lan743x_adapter *adapter = context;
 146	struct lan743x_intr *intr = &adapter->intr;
 147	u32 int_sts;
 148
 149	int_sts = lan743x_csr_read(adapter, INT_STS);
 150	if (int_sts & INT_BIT_SW_GP_) {
 151		lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
 152		intr->software_isr_flag = 1;
 153	}
 154}
 155
 156static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
 157{
 158	struct lan743x_tx *tx = context;
 159	struct lan743x_adapter *adapter = tx->adapter;
 160	bool enable_flag = true;
 161	u32 int_en = 0;
 162
 163	int_en = lan743x_csr_read(adapter, INT_EN_SET);
 164	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 165		lan743x_csr_write(adapter, INT_EN_CLR,
 166				  INT_BIT_DMA_TX_(tx->channel_number));
 167	}
 168
 169	if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
 170		u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
 171		u32 dmac_int_sts;
 172		u32 dmac_int_en;
 173
 174		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 175			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 176		else
 177			dmac_int_sts = ioc_bit;
 178		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 179			dmac_int_en = lan743x_csr_read(adapter,
 180						       DMAC_INT_EN_SET);
 181		else
 182			dmac_int_en = ioc_bit;
 183
 184		dmac_int_en &= ioc_bit;
 185		dmac_int_sts &= dmac_int_en;
 186		if (dmac_int_sts & ioc_bit) {
 187			napi_schedule(&tx->napi);
 188			enable_flag = false;/* poll func will enable later */
 189		}
 190	}
 191
 192	if (enable_flag)
 193		/* enable isr */
 194		lan743x_csr_write(adapter, INT_EN_SET,
 195				  INT_BIT_DMA_TX_(tx->channel_number));
 196}
 197
 198static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
 199{
 200	struct lan743x_rx *rx = context;
 201	struct lan743x_adapter *adapter = rx->adapter;
 202	bool enable_flag = true;
 203
 204	if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
 205		lan743x_csr_write(adapter, INT_EN_CLR,
 206				  INT_BIT_DMA_RX_(rx->channel_number));
 207	}
 208
 209	if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
 210		u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
 211		u32 dmac_int_sts;
 212		u32 dmac_int_en;
 213
 214		if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
 215			dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
 216		else
 217			dmac_int_sts = rx_frame_bit;
 218		if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
 219			dmac_int_en = lan743x_csr_read(adapter,
 220						       DMAC_INT_EN_SET);
 221		else
 222			dmac_int_en = rx_frame_bit;
 223
 224		dmac_int_en &= rx_frame_bit;
 225		dmac_int_sts &= dmac_int_en;
 226		if (dmac_int_sts & rx_frame_bit) {
 227			napi_schedule(&rx->napi);
 228			enable_flag = false;/* poll funct will enable later */
 229		}
 230	}
 231
 232	if (enable_flag) {
 233		/* enable isr */
 234		lan743x_csr_write(adapter, INT_EN_SET,
 235				  INT_BIT_DMA_RX_(rx->channel_number));
 236	}
 237}
 238
 239static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
 240{
 241	struct lan743x_adapter *adapter = context;
 242	unsigned int channel;
 243
 244	if (int_sts & INT_BIT_ALL_RX_) {
 245		for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
 246			channel++) {
 247			u32 int_bit = INT_BIT_DMA_RX_(channel);
 248
 249			if (int_sts & int_bit) {
 250				lan743x_rx_isr(&adapter->rx[channel],
 251					       int_bit, flags);
 252				int_sts &= ~int_bit;
 253			}
 254		}
 255	}
 256	if (int_sts & INT_BIT_ALL_TX_) {
 257		for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
 258			channel++) {
 259			u32 int_bit = INT_BIT_DMA_TX_(channel);
 260
 261			if (int_sts & int_bit) {
 262				lan743x_tx_isr(&adapter->tx[channel],
 263					       int_bit, flags);
 264				int_sts &= ~int_bit;
 265			}
 266		}
 267	}
 268	if (int_sts & INT_BIT_ALL_OTHER_) {
 269		if (int_sts & INT_BIT_SW_GP_) {
 270			lan743x_intr_software_isr(adapter);
 271			int_sts &= ~INT_BIT_SW_GP_;
 272		}
 273		if (int_sts & INT_BIT_1588_) {
 274			lan743x_ptp_isr(adapter);
 275			int_sts &= ~INT_BIT_1588_;
 276		}
 277	}
 278	if (int_sts)
 279		lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
 280}
 281
 282static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
 283{
 284	struct lan743x_vector *vector = ptr;
 285	struct lan743x_adapter *adapter = vector->adapter;
 286	irqreturn_t result = IRQ_NONE;
 287	u32 int_enables;
 288	u32 int_sts;
 289
 290	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
 291		int_sts = lan743x_csr_read(adapter, INT_STS);
 292	} else if (vector->flags &
 293		   (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
 294		   LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
 295		int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
 296	} else {
 297		/* use mask as implied status */
 298		int_sts = vector->int_mask | INT_BIT_MAS_;
 299	}
 300
 301	if (!(int_sts & INT_BIT_MAS_))
 302		goto irq_done;
 303
 304	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
 305		/* disable vector interrupt */
 306		lan743x_csr_write(adapter,
 307				  INT_VEC_EN_CLR,
 308				  INT_VEC_EN_(vector->vector_index));
 309
 310	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
 311		/* disable master interrupt */
 312		lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 313
 314	if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
 315		int_enables = lan743x_csr_read(adapter, INT_EN_SET);
 316	} else {
 317		/*  use vector mask as implied enable mask */
 318		int_enables = vector->int_mask;
 319	}
 320
 321	int_sts &= int_enables;
 322	int_sts &= vector->int_mask;
 323	if (int_sts) {
 324		if (vector->handler) {
 325			vector->handler(vector->context,
 326					int_sts, vector->flags);
 327		} else {
 328			/* disable interrupts on this vector */
 329			lan743x_csr_write(adapter, INT_EN_CLR,
 330					  vector->int_mask);
 331		}
 332		result = IRQ_HANDLED;
 333	}
 334
 335	if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
 336		/* enable master interrupt */
 337		lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 338
 339	if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
 340		/* enable vector interrupt */
 341		lan743x_csr_write(adapter,
 342				  INT_VEC_EN_SET,
 343				  INT_VEC_EN_(vector->vector_index));
 344irq_done:
 345	return result;
 346}
 347
 348static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
 349{
 350	struct lan743x_intr *intr = &adapter->intr;
 351	int result = -ENODEV;
 352	int timeout = 10;
 353
 354	intr->software_isr_flag = 0;
 355
 356	/* enable interrupt */
 357	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
 358
 359	/* activate interrupt here */
 360	lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
 361	while ((timeout > 0) && (!(intr->software_isr_flag))) {
 362		usleep_range(1000, 20000);
 363		timeout--;
 364	}
 365
 366	if (intr->software_isr_flag)
 367		result = 0;
 
 368
 369	/* disable interrupts */
 370	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
 371	return result;
 
 372}
 373
 374static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
 375				     int vector_index, u32 flags,
 376				     u32 int_mask,
 377				     lan743x_vector_handler handler,
 378				     void *context)
 379{
 380	struct lan743x_vector *vector = &adapter->intr.vector_list
 381					[vector_index];
 382	int ret;
 383
 384	vector->adapter = adapter;
 385	vector->flags = flags;
 386	vector->vector_index = vector_index;
 387	vector->int_mask = int_mask;
 388	vector->handler = handler;
 389	vector->context = context;
 390
 391	ret = request_irq(vector->irq,
 392			  lan743x_intr_entry_isr,
 393			  (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
 394			  IRQF_SHARED : 0, DRIVER_NAME, vector);
 395	if (ret) {
 396		vector->handler = NULL;
 397		vector->context = NULL;
 398		vector->int_mask = 0;
 399		vector->flags = 0;
 400	}
 401	return ret;
 402}
 403
 404static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
 405					int vector_index)
 406{
 407	struct lan743x_vector *vector = &adapter->intr.vector_list
 408					[vector_index];
 409
 410	free_irq(vector->irq, vector);
 411	vector->handler = NULL;
 412	vector->context = NULL;
 413	vector->int_mask = 0;
 414	vector->flags = 0;
 415}
 416
 417static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
 418					 u32 int_mask)
 419{
 420	int index;
 421
 422	for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
 423		if (adapter->intr.vector_list[index].int_mask & int_mask)
 424			return adapter->intr.vector_list[index].flags;
 425	}
 426	return 0;
 427}
 428
 429static void lan743x_intr_close(struct lan743x_adapter *adapter)
 430{
 431	struct lan743x_intr *intr = &adapter->intr;
 432	int index = 0;
 433
 434	lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
 435	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
 
 
 
 436
 437	for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
 438		if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
 439			lan743x_intr_unregister_isr(adapter, index);
 440			intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
 441		}
 442	}
 443
 444	if (intr->flags & INTR_FLAG_MSI_ENABLED) {
 445		pci_disable_msi(adapter->pdev);
 446		intr->flags &= ~INTR_FLAG_MSI_ENABLED;
 447	}
 448
 449	if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
 450		pci_disable_msix(adapter->pdev);
 451		intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
 452	}
 453}
 454
 455static int lan743x_intr_open(struct lan743x_adapter *adapter)
 456{
 457	struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
 458	struct lan743x_intr *intr = &adapter->intr;
 
 459	u32 int_vec_en_auto_clr = 0;
 
 460	u32 int_vec_map0 = 0;
 461	u32 int_vec_map1 = 0;
 462	int ret = -ENODEV;
 463	int index = 0;
 464	u32 flags = 0;
 465
 466	intr->number_of_vectors = 0;
 467
 468	/* Try to set up MSIX interrupts */
 
 469	memset(&msix_entries[0], 0,
 470	       sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
 471	for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
 472		msix_entries[index].entry = index;
 
 473	ret = pci_enable_msix_range(adapter->pdev,
 474				    msix_entries, 1,
 475				    1 + LAN743X_USED_TX_CHANNELS +
 476				    LAN743X_USED_RX_CHANNELS);
 477
 478	if (ret > 0) {
 479		intr->flags |= INTR_FLAG_MSIX_ENABLED;
 480		intr->number_of_vectors = ret;
 481		intr->using_vectors = true;
 482		for (index = 0; index < intr->number_of_vectors; index++)
 483			intr->vector_list[index].irq = msix_entries
 484						       [index].vector;
 485		netif_info(adapter, ifup, adapter->netdev,
 486			   "using MSIX interrupts, number of vectors = %d\n",
 487			   intr->number_of_vectors);
 488	}
 489
 490	/* If MSIX failed try to setup using MSI interrupts */
 491	if (!intr->number_of_vectors) {
 492		if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 493			if (!pci_enable_msi(adapter->pdev)) {
 494				intr->flags |= INTR_FLAG_MSI_ENABLED;
 495				intr->number_of_vectors = 1;
 496				intr->using_vectors = true;
 497				intr->vector_list[0].irq =
 498					adapter->pdev->irq;
 499				netif_info(adapter, ifup, adapter->netdev,
 500					   "using MSI interrupts, number of vectors = %d\n",
 501					   intr->number_of_vectors);
 502			}
 503		}
 504	}
 505
 506	/* If MSIX, and MSI failed, setup using legacy interrupt */
 507	if (!intr->number_of_vectors) {
 508		intr->number_of_vectors = 1;
 509		intr->using_vectors = false;
 510		intr->vector_list[0].irq = intr->irq;
 511		netif_info(adapter, ifup, adapter->netdev,
 512			   "using legacy interrupts\n");
 513	}
 514
 515	/* At this point we must have at least one irq */
 516	lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
 517
 518	/* map all interrupts to vector 0 */
 519	lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
 520	lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
 521	lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
 522	flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 523		LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 524		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 525		LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 526
 527	if (intr->using_vectors) {
 528		flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 529			 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 530	} else {
 531		flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
 532			 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
 533			 LAN743X_VECTOR_FLAG_IRQ_SHARED;
 534	}
 535
 536	if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 537		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
 538		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
 539		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
 540		flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
 541		flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
 542		flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
 543	}
 544
 
 
 545	ret = lan743x_intr_register_isr(adapter, 0, flags,
 546					INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
 547					INT_BIT_ALL_OTHER_,
 548					lan743x_intr_shared_isr, adapter);
 549	if (ret)
 550		goto clean_up;
 551	intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
 552
 553	if (intr->using_vectors)
 554		lan743x_csr_write(adapter, INT_VEC_EN_SET,
 555				  INT_VEC_EN_(0));
 556
 557	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
 558		lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
 559		lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
 560		lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
 561		lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
 562		lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
 563		lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
 564		lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
 565		lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
 566		lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
 567		lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
 
 
 
 
 
 
 
 568		lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
 569	}
 570
 571	/* enable interrupts */
 572	lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
 573	ret = lan743x_intr_test_isr(adapter);
 574	if (ret)
 575		goto clean_up;
 576
 577	if (intr->number_of_vectors > 1) {
 578		int number_of_tx_vectors = intr->number_of_vectors - 1;
 579
 580		if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
 581			number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
 582		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 583			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 584			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 585			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 586			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 587			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 588
 589		if (adapter->csr.flags &
 590		   LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 591			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 592				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 593				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 594				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 595		}
 596
 597		for (index = 0; index < number_of_tx_vectors; index++) {
 598			u32 int_bit = INT_BIT_DMA_TX_(index);
 599			int vector = index + 1;
 600
 601			/* map TX interrupt to vector */
 602			int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
 603			lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
 604
 605			/* Remove TX interrupt from shared mask */
 606			intr->vector_list[0].int_mask &= ~int_bit;
 607			ret = lan743x_intr_register_isr(adapter, vector, flags,
 608							int_bit, lan743x_tx_isr,
 609							&adapter->tx[index]);
 610			if (ret)
 611				goto clean_up;
 612			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 613			if (!(flags &
 614			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
 615				lan743x_csr_write(adapter, INT_VEC_EN_SET,
 616						  INT_VEC_EN_(vector));
 617		}
 618	}
 619	if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
 620		int number_of_rx_vectors = intr->number_of_vectors -
 621					   LAN743X_USED_TX_CHANNELS - 1;
 622
 623		if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
 624			number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
 625
 626		flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
 627			LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
 628			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
 629			LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
 630			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
 631			LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
 632
 633		if (adapter->csr.flags &
 634		    LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
 635			flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
 636				LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
 637				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
 638				LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
 639				LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
 640		}
 641		for (index = 0; index < number_of_rx_vectors; index++) {
 642			int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
 643			u32 int_bit = INT_BIT_DMA_RX_(index);
 644
 645			/* map RX interrupt to vector */
 646			int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
 647			lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
 648			if (flags &
 649			    LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
 650				int_vec_en_auto_clr |= INT_VEC_EN_(vector);
 651				lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
 652						  int_vec_en_auto_clr);
 653			}
 654
 655			/* Remove RX interrupt from shared mask */
 656			intr->vector_list[0].int_mask &= ~int_bit;
 657			ret = lan743x_intr_register_isr(adapter, vector, flags,
 658							int_bit, lan743x_rx_isr,
 659							&adapter->rx[index]);
 660			if (ret)
 661				goto clean_up;
 662			intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
 663
 664			lan743x_csr_write(adapter, INT_VEC_EN_SET,
 665					  INT_VEC_EN_(vector));
 666		}
 667	}
 668	return 0;
 669
 670clean_up:
 671	lan743x_intr_close(adapter);
 672	return ret;
 673}
 674
 675static int lan743x_dp_write(struct lan743x_adapter *adapter,
 676			    u32 select, u32 addr, u32 length, u32 *buf)
 677{
 678	int ret = -EIO;
 679	u32 dp_sel;
 680	int i;
 681
 682	mutex_lock(&adapter->dp_lock);
 683	if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
 684				     1, 40, 100, 100))
 685		goto unlock;
 686	dp_sel = lan743x_csr_read(adapter, DP_SEL);
 687	dp_sel &= ~DP_SEL_MASK_;
 688	dp_sel |= select;
 689	lan743x_csr_write(adapter, DP_SEL, dp_sel);
 690
 691	for (i = 0; i < length; i++) {
 692		lan743x_csr_write(adapter, DP_ADDR, addr + i);
 693		lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
 694		lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
 695		if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
 696					     1, 40, 100, 100))
 697			goto unlock;
 
 698	}
 699	ret = 0;
 700
 701unlock:
 702	mutex_unlock(&adapter->dp_lock);
 703	return ret;
 704}
 705
 706static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
 707{
 708	u32 ret;
 709
 710	ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
 711		MAC_MII_ACC_PHY_ADDR_MASK_;
 712	ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
 713		MAC_MII_ACC_MIIRINDA_MASK_;
 714
 715	if (read)
 716		ret |= MAC_MII_ACC_MII_READ_;
 717	else
 718		ret |= MAC_MII_ACC_MII_WRITE_;
 719	ret |= MAC_MII_ACC_MII_BUSY_;
 720
 721	return ret;
 722}
 723
 724static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
 725{
 726	u32 data;
 727
 728	return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
 729				  !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
 730}
 731
 732static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
 733{
 734	struct lan743x_adapter *adapter = bus->priv;
 735	u32 val, mii_access;
 736	int ret;
 737
 738	/* comfirm MII not busy */
 739	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 740	if (ret < 0)
 741		return ret;
 742
 743	/* set the address, index & direction (read from PHY) */
 744	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
 745	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 746	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 747	if (ret < 0)
 748		return ret;
 749
 750	val = lan743x_csr_read(adapter, MAC_MII_DATA);
 751	return (int)(val & 0xFFFF);
 752}
 753
 754static int lan743x_mdiobus_write(struct mii_bus *bus,
 755				 int phy_id, int index, u16 regval)
 756{
 757	struct lan743x_adapter *adapter = bus->priv;
 758	u32 val, mii_access;
 759	int ret;
 760
 761	/* confirm MII not busy */
 762	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 763	if (ret < 0)
 764		return ret;
 765	val = (u32)regval;
 766	lan743x_csr_write(adapter, MAC_MII_DATA, val);
 767
 768	/* set the address, index & direction (write to PHY) */
 769	mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
 770	lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
 771	ret = lan743x_mac_mii_wait_till_not_busy(adapter);
 772	return ret;
 773}
 774
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 775static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
 776				    u8 *addr)
 777{
 778	u32 addr_lo, addr_hi;
 779
 780	addr_lo = addr[0] |
 781		addr[1] << 8 |
 782		addr[2] << 16 |
 783		addr[3] << 24;
 784	addr_hi = addr[4] |
 785		addr[5] << 8;
 786	lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
 787	lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
 788
 789	ether_addr_copy(adapter->mac_address, addr);
 790	netif_info(adapter, drv, adapter->netdev,
 791		   "MAC address set to %pM\n", addr);
 792}
 793
 794static int lan743x_mac_init(struct lan743x_adapter *adapter)
 795{
 796	bool mac_address_valid = true;
 797	struct net_device *netdev;
 798	u32 mac_addr_hi = 0;
 799	u32 mac_addr_lo = 0;
 800	u32 data;
 801
 802	netdev = adapter->netdev;
 803
 804	/* disable auto duplex, and speed detection. Phylib does that */
 805	data = lan743x_csr_read(adapter, MAC_CR);
 806	data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
 807	data |= MAC_CR_CNTR_RST_;
 808	lan743x_csr_write(adapter, MAC_CR, data);
 809
 810	if (!is_valid_ether_addr(adapter->mac_address)) {
 811		mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
 812		mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
 813		adapter->mac_address[0] = mac_addr_lo & 0xFF;
 814		adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
 815		adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
 816		adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
 817		adapter->mac_address[4] = mac_addr_hi & 0xFF;
 818		adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
 819
 820		if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
 821		    mac_addr_lo == 0xFFFFFFFF) {
 822			mac_address_valid = false;
 823		} else if (!is_valid_ether_addr(adapter->mac_address)) {
 824			mac_address_valid = false;
 825		}
 826
 827		if (!mac_address_valid)
 828			eth_random_addr(adapter->mac_address);
 829	}
 830	lan743x_mac_set_address(adapter, adapter->mac_address);
 831	ether_addr_copy(netdev->dev_addr, adapter->mac_address);
 832
 833	return 0;
 834}
 835
 836static int lan743x_mac_open(struct lan743x_adapter *adapter)
 837{
 838	int ret = 0;
 839	u32 temp;
 840
 841	temp = lan743x_csr_read(adapter, MAC_RX);
 842	lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
 843	temp = lan743x_csr_read(adapter, MAC_TX);
 844	lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
 845	return ret;
 846}
 847
 848static void lan743x_mac_close(struct lan743x_adapter *adapter)
 849{
 850	u32 temp;
 851
 852	temp = lan743x_csr_read(adapter, MAC_TX);
 853	temp &= ~MAC_TX_TXEN_;
 854	lan743x_csr_write(adapter, MAC_TX, temp);
 855	lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
 856				 1, 1000, 20000, 100);
 857
 858	temp = lan743x_csr_read(adapter, MAC_RX);
 859	temp &= ~MAC_RX_RXEN_;
 860	lan743x_csr_write(adapter, MAC_RX, temp);
 861	lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
 862				 1, 1000, 20000, 100);
 863}
 864
 865static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
 866					      bool tx_enable, bool rx_enable)
 867{
 868	u32 flow_setting = 0;
 869
 870	/* set maximum pause time because when fifo space frees
 871	 * up a zero value pause frame will be sent to release the pause
 872	 */
 873	flow_setting = MAC_FLOW_CR_FCPT_MASK_;
 874	if (tx_enable)
 875		flow_setting |= MAC_FLOW_CR_TX_FCEN_;
 876	if (rx_enable)
 877		flow_setting |= MAC_FLOW_CR_RX_FCEN_;
 878	lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
 879}
 880
 881static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
 882{
 883	int enabled = 0;
 884	u32 mac_rx = 0;
 885
 886	mac_rx = lan743x_csr_read(adapter, MAC_RX);
 887	if (mac_rx & MAC_RX_RXEN_) {
 888		enabled = 1;
 889		if (mac_rx & MAC_RX_RXD_) {
 890			lan743x_csr_write(adapter, MAC_RX, mac_rx);
 891			mac_rx &= ~MAC_RX_RXD_;
 892		}
 893		mac_rx &= ~MAC_RX_RXEN_;
 894		lan743x_csr_write(adapter, MAC_RX, mac_rx);
 895		lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
 896					 1, 1000, 20000, 100);
 897		lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
 898	}
 899
 900	mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
 901	mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
 902		  MAC_RX_MAX_SIZE_MASK_);
 903	lan743x_csr_write(adapter, MAC_RX, mac_rx);
 904
 905	if (enabled) {
 906		mac_rx |= MAC_RX_RXEN_;
 907		lan743x_csr_write(adapter, MAC_RX, mac_rx);
 908	}
 909	return 0;
 910}
 911
 912/* PHY */
 913static int lan743x_phy_reset(struct lan743x_adapter *adapter)
 914{
 915	u32 data;
 916
 917	/* Only called with in probe, and before mdiobus_register */
 918
 919	data = lan743x_csr_read(adapter, PMT_CTL);
 920	data |= PMT_CTL_ETH_PHY_RST_;
 921	lan743x_csr_write(adapter, PMT_CTL, data);
 922
 923	return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
 924				  (!(data & PMT_CTL_ETH_PHY_RST_) &&
 925				  (data & PMT_CTL_READY_)),
 926				  50000, 1000000);
 927}
 928
 929static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
 930					   u8 duplex, u16 local_adv,
 931					   u16 remote_adv)
 932{
 933	struct lan743x_phy *phy = &adapter->phy;
 934	u8 cap;
 935
 936	if (phy->fc_autoneg)
 937		cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
 938	else
 939		cap = phy->fc_request_control;
 940
 941	lan743x_mac_flow_ctrl_set_enables(adapter,
 942					  cap & FLOW_CTRL_TX,
 943					  cap & FLOW_CTRL_RX);
 944}
 945
 946static int lan743x_phy_init(struct lan743x_adapter *adapter)
 947{
 948	return lan743x_phy_reset(adapter);
 949}
 950
 951static void lan743x_phy_link_status_change(struct net_device *netdev)
 952{
 953	struct lan743x_adapter *adapter = netdev_priv(netdev);
 954	struct phy_device *phydev = netdev->phydev;
 955	u32 data;
 956
 957	phy_print_status(phydev);
 958	if (phydev->state == PHY_RUNNING) {
 959		struct ethtool_link_ksettings ksettings;
 960		int remote_advertisement = 0;
 961		int local_advertisement = 0;
 962
 963		data = lan743x_csr_read(adapter, MAC_CR);
 964
 965		/* set interface mode */
 966		if (phy_interface_mode_is_rgmii(adapter->phy_mode))
 967			/* RGMII */
 968			data &= ~MAC_CR_MII_EN_;
 969		else
 970			/* GMII */
 971			data |= MAC_CR_MII_EN_;
 972
 973		/* set duplex mode */
 974		if (phydev->duplex)
 975			data |= MAC_CR_DPX_;
 976		else
 977			data &= ~MAC_CR_DPX_;
 978
 979		/* set bus speed */
 980		switch (phydev->speed) {
 981		case SPEED_10:
 982			data &= ~MAC_CR_CFG_H_;
 983			data &= ~MAC_CR_CFG_L_;
 984		break;
 985		case SPEED_100:
 986			data &= ~MAC_CR_CFG_H_;
 987			data |= MAC_CR_CFG_L_;
 988		break;
 989		case SPEED_1000:
 990			data |= MAC_CR_CFG_H_;
 991			data &= ~MAC_CR_CFG_L_;
 992		break;
 
 
 
 
 993		}
 994		lan743x_csr_write(adapter, MAC_CR, data);
 995
 996		memset(&ksettings, 0, sizeof(ksettings));
 997		phy_ethtool_get_link_ksettings(netdev, &ksettings);
 998		local_advertisement =
 999			linkmode_adv_to_mii_adv_t(phydev->advertising);
1000		remote_advertisement =
1001			linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
1002
1003		lan743x_phy_update_flowcontrol(adapter,
1004					       ksettings.base.duplex,
1005					       local_advertisement,
1006					       remote_advertisement);
1007		lan743x_ptp_update_latency(adapter, ksettings.base.speed);
 
 
 
 
1008	}
1009}
1010
1011static void lan743x_phy_close(struct lan743x_adapter *adapter)
1012{
1013	struct net_device *netdev = adapter->netdev;
 
1014
1015	phy_stop(netdev->phydev);
1016	phy_disconnect(netdev->phydev);
1017	netdev->phydev = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1018}
1019
1020static int lan743x_phy_open(struct lan743x_adapter *adapter)
1021{
 
1022	struct lan743x_phy *phy = &adapter->phy;
1023	struct device_node *phynode;
 
 
 
 
1024	struct phy_device *phydev;
1025	struct net_device *netdev;
1026	int ret = -EIO;
1027
1028	netdev = adapter->netdev;
1029	phynode = of_node_get(adapter->pdev->dev.of_node);
1030	adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
1031
1032	if (phynode) {
1033		of_get_phy_mode(phynode, &adapter->phy_mode);
1034
1035		if (of_phy_is_fixed_link(phynode)) {
1036			ret = of_phy_register_fixed_link(phynode);
1037			if (ret) {
1038				netdev_err(netdev,
1039					   "cannot register fixed PHY\n");
1040				of_node_put(phynode);
 
 
 
 
1041				goto return_error;
1042			}
1043		}
1044		phydev = of_phy_connect(netdev, phynode,
1045					lan743x_phy_link_status_change, 0,
1046					adapter->phy_mode);
1047		of_node_put(phynode);
1048		if (!phydev)
1049			goto return_error;
1050	} else {
1051		phydev = phy_find_first(adapter->mdiobus);
1052		if (!phydev)
1053			goto return_error;
1054
1055		ret = phy_connect_direct(netdev, phydev,
1056					 lan743x_phy_link_status_change,
1057					 adapter->phy_mode);
1058		if (ret)
1059			goto return_error;
1060	}
1061
1062	/* MAC doesn't support 1000T Half */
1063	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1064
1065	/* support both flow controls */
1066	phy_support_asym_pause(phydev);
1067	phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1068	phy->fc_autoneg = phydev->autoneg;
1069
1070	phy_start(phydev);
1071	phy_start_aneg(phydev);
 
1072	return 0;
1073
1074return_error:
1075	return ret;
1076}
1077
1078static void lan743x_rfe_open(struct lan743x_adapter *adapter)
1079{
1080	lan743x_csr_write(adapter, RFE_RSS_CFG,
1081		RFE_RSS_CFG_UDP_IPV6_EX_ |
1082		RFE_RSS_CFG_TCP_IPV6_EX_ |
1083		RFE_RSS_CFG_IPV6_EX_ |
1084		RFE_RSS_CFG_UDP_IPV6_ |
1085		RFE_RSS_CFG_TCP_IPV6_ |
1086		RFE_RSS_CFG_IPV6_ |
1087		RFE_RSS_CFG_UDP_IPV4_ |
1088		RFE_RSS_CFG_TCP_IPV4_ |
1089		RFE_RSS_CFG_IPV4_ |
1090		RFE_RSS_CFG_VALID_HASH_BITS_ |
1091		RFE_RSS_CFG_RSS_QUEUE_ENABLE_ |
1092		RFE_RSS_CFG_RSS_HASH_STORE_ |
1093		RFE_RSS_CFG_RSS_ENABLE_);
1094}
1095
1096static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
1097{
1098	u8 *mac_addr;
1099	u32 mac_addr_hi = 0;
1100	u32 mac_addr_lo = 0;
1101
1102	/* Add mac address to perfect Filter */
1103	mac_addr = adapter->mac_address;
1104	mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
1105		      (((u32)(mac_addr[1])) << 8) |
1106		      (((u32)(mac_addr[2])) << 16) |
1107		      (((u32)(mac_addr[3])) << 24));
1108	mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
1109		      (((u32)(mac_addr[5])) << 8));
1110
1111	lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
1112	lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
1113			  mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
1114}
1115
1116static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
1117{
1118	struct net_device *netdev = adapter->netdev;
1119	u32 hash_table[DP_SEL_VHF_HASH_LEN];
1120	u32 rfctl;
1121	u32 data;
1122
1123	rfctl = lan743x_csr_read(adapter, RFE_CTL);
1124	rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
1125		 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1126	rfctl |= RFE_CTL_AB_;
1127	if (netdev->flags & IFF_PROMISC) {
1128		rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
1129	} else {
1130		if (netdev->flags & IFF_ALLMULTI)
1131			rfctl |= RFE_CTL_AM_;
1132	}
1133
 
 
 
1134	memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
1135	if (netdev_mc_count(netdev)) {
1136		struct netdev_hw_addr *ha;
1137		int i;
1138
1139		rfctl |= RFE_CTL_DA_PERFECT_;
1140		i = 1;
1141		netdev_for_each_mc_addr(ha, netdev) {
1142			/* set first 32 into Perfect Filter */
1143			if (i < 33) {
1144				lan743x_csr_write(adapter,
1145						  RFE_ADDR_FILT_HI(i), 0);
1146				data = ha->addr[3];
1147				data = ha->addr[2] | (data << 8);
1148				data = ha->addr[1] | (data << 8);
1149				data = ha->addr[0] | (data << 8);
1150				lan743x_csr_write(adapter,
1151						  RFE_ADDR_FILT_LO(i), data);
1152				data = ha->addr[5];
1153				data = ha->addr[4] | (data << 8);
1154				data |= RFE_ADDR_FILT_HI_VALID_;
1155				lan743x_csr_write(adapter,
1156						  RFE_ADDR_FILT_HI(i), data);
1157			} else {
1158				u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
1159					     23) & 0x1FF;
1160				hash_table[bitnum / 32] |= (1 << (bitnum % 32));
1161				rfctl |= RFE_CTL_MCAST_HASH_;
1162			}
1163			i++;
1164		}
1165	}
1166
1167	lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
1168			 DP_SEL_VHF_VLAN_LEN,
1169			 DP_SEL_VHF_HASH_LEN, hash_table);
1170	lan743x_csr_write(adapter, RFE_CTL, rfctl);
1171}
1172
1173static int lan743x_dmac_init(struct lan743x_adapter *adapter)
1174{
1175	u32 data = 0;
1176
1177	lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
1178	lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
1179				 0, 1000, 20000, 100);
1180	switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
1181	case DMA_DESCRIPTOR_SPACING_16:
1182		data = DMAC_CFG_MAX_DSPACE_16_;
1183		break;
1184	case DMA_DESCRIPTOR_SPACING_32:
1185		data = DMAC_CFG_MAX_DSPACE_32_;
1186		break;
1187	case DMA_DESCRIPTOR_SPACING_64:
1188		data = DMAC_CFG_MAX_DSPACE_64_;
1189		break;
1190	case DMA_DESCRIPTOR_SPACING_128:
1191		data = DMAC_CFG_MAX_DSPACE_128_;
1192		break;
1193	default:
1194		return -EPERM;
1195	}
1196	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1197		data |= DMAC_CFG_COAL_EN_;
1198	data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
1199	data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
1200	lan743x_csr_write(adapter, DMAC_CFG, data);
1201	data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
1202	data |= DMAC_COAL_CFG_TIMER_TX_START_;
1203	data |= DMAC_COAL_CFG_FLUSH_INTS_;
1204	data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
1205	data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
1206	data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
1207	data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
1208	lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
1209	data = DMAC_OBFF_TX_THRES_SET_(0x08);
1210	data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
1211	lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
1212	return 0;
1213}
1214
1215static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
1216				     int tx_channel)
1217{
1218	u32 dmac_cmd = 0;
1219
1220	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1221	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1222				      DMAC_CMD_START_T_(tx_channel)),
1223				      (dmac_cmd &
1224				      DMAC_CMD_STOP_T_(tx_channel)));
1225}
1226
1227static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
1228					     int tx_channel)
1229{
1230	int timeout = 100;
1231	int result = 0;
1232
1233	while (timeout &&
1234	       ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
1235	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1236		usleep_range(1000, 20000);
1237		timeout--;
1238	}
1239	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1240		result = -ENODEV;
1241	return result;
1242}
1243
1244static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
1245				     int rx_channel)
1246{
1247	u32 dmac_cmd = 0;
1248
1249	dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1250	return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1251				      DMAC_CMD_START_R_(rx_channel)),
1252				      (dmac_cmd &
1253				      DMAC_CMD_STOP_R_(rx_channel)));
1254}
1255
1256static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
1257					     int rx_channel)
1258{
1259	int timeout = 100;
1260	int result = 0;
1261
1262	while (timeout &&
1263	       ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
1264	       DMAC_CHANNEL_STATE_STOP_PENDING)) {
1265		usleep_range(1000, 20000);
1266		timeout--;
1267	}
1268	if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1269		result = -ENODEV;
1270	return result;
1271}
1272
1273static void lan743x_tx_release_desc(struct lan743x_tx *tx,
1274				    int descriptor_index, bool cleanup)
1275{
1276	struct lan743x_tx_buffer_info *buffer_info = NULL;
1277	struct lan743x_tx_descriptor *descriptor = NULL;
1278	u32 descriptor_type = 0;
1279	bool ignore_sync;
1280
1281	descriptor = &tx->ring_cpu_ptr[descriptor_index];
1282	buffer_info = &tx->buffer_info[descriptor_index];
1283	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
1284		goto done;
1285
1286	descriptor_type = (descriptor->data0) &
1287			  TX_DESC_DATA0_DTYPE_MASK_;
1288	if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
1289		goto clean_up_data_descriptor;
1290	else
1291		goto clear_active;
1292
1293clean_up_data_descriptor:
1294	if (buffer_info->dma_ptr) {
1295		if (buffer_info->flags &
1296		    TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
1297			dma_unmap_page(&tx->adapter->pdev->dev,
1298				       buffer_info->dma_ptr,
1299				       buffer_info->buffer_length,
1300				       DMA_TO_DEVICE);
1301		} else {
1302			dma_unmap_single(&tx->adapter->pdev->dev,
1303					 buffer_info->dma_ptr,
1304					 buffer_info->buffer_length,
1305					 DMA_TO_DEVICE);
1306		}
1307		buffer_info->dma_ptr = 0;
1308		buffer_info->buffer_length = 0;
1309	}
1310	if (!buffer_info->skb)
1311		goto clear_active;
1312
1313	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
1314		dev_kfree_skb(buffer_info->skb);
1315		goto clear_skb;
1316	}
1317
1318	if (cleanup) {
1319		lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
1320		dev_kfree_skb(buffer_info->skb);
1321	} else {
1322		ignore_sync = (buffer_info->flags &
1323			       TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
1324		lan743x_ptp_tx_timestamp_skb(tx->adapter,
1325					     buffer_info->skb, ignore_sync);
1326	}
1327
1328clear_skb:
1329	buffer_info->skb = NULL;
1330
1331clear_active:
1332	buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
1333
1334done:
1335	memset(buffer_info, 0, sizeof(*buffer_info));
1336	memset(descriptor, 0, sizeof(*descriptor));
1337}
1338
1339static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
1340{
1341	return ((++index) % tx->ring_size);
1342}
1343
1344static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
1345{
1346	while ((*tx->head_cpu_ptr) != (tx->last_head)) {
1347		lan743x_tx_release_desc(tx, tx->last_head, false);
1348		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1349	}
1350}
1351
1352static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
1353{
1354	u32 original_head = 0;
1355
1356	original_head = tx->last_head;
1357	do {
1358		lan743x_tx_release_desc(tx, tx->last_head, true);
1359		tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1360	} while (tx->last_head != original_head);
1361	memset(tx->ring_cpu_ptr, 0,
1362	       sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
1363	memset(tx->buffer_info, 0,
1364	       sizeof(*tx->buffer_info) * (tx->ring_size));
1365}
1366
1367static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
1368				   struct sk_buff *skb)
1369{
1370	int result = 1; /* 1 for the main skb buffer */
1371	int nr_frags = 0;
1372
1373	if (skb_is_gso(skb))
1374		result++; /* requires an extension descriptor */
1375	nr_frags = skb_shinfo(skb)->nr_frags;
1376	result += nr_frags; /* 1 for each fragment buffer */
1377	return result;
1378}
1379
1380static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
1381{
1382	int last_head = tx->last_head;
1383	int last_tail = tx->last_tail;
1384
1385	if (last_tail >= last_head)
1386		return tx->ring_size - last_tail + last_head - 1;
1387	else
1388		return last_head - last_tail - 1;
1389}
1390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1391void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
1392				      bool enable_timestamping,
1393				      bool enable_onestep_sync)
1394{
1395	if (enable_timestamping)
1396		tx->ts_flags |= TX_TS_FLAG_TIMESTAMPING_ENABLED;
1397	else
1398		tx->ts_flags &= ~TX_TS_FLAG_TIMESTAMPING_ENABLED;
1399	if (enable_onestep_sync)
1400		tx->ts_flags |= TX_TS_FLAG_ONE_STEP_SYNC;
1401	else
1402		tx->ts_flags &= ~TX_TS_FLAG_ONE_STEP_SYNC;
1403}
1404
1405static int lan743x_tx_frame_start(struct lan743x_tx *tx,
1406				  unsigned char *first_buffer,
1407				  unsigned int first_buffer_length,
1408				  unsigned int frame_length,
1409				  bool time_stamp,
1410				  bool check_sum)
1411{
1412	/* called only from within lan743x_tx_xmit_frame.
1413	 * assuming tx->ring_lock has already been acquired.
1414	 */
1415	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1416	struct lan743x_tx_buffer_info *buffer_info = NULL;
1417	struct lan743x_adapter *adapter = tx->adapter;
1418	struct device *dev = &adapter->pdev->dev;
1419	dma_addr_t dma_ptr;
1420
1421	tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
1422	tx->frame_first = tx->last_tail;
1423	tx->frame_tail = tx->frame_first;
1424
1425	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1426	buffer_info = &tx->buffer_info[tx->frame_tail];
1427	dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
1428				 DMA_TO_DEVICE);
1429	if (dma_mapping_error(dev, dma_ptr))
1430		return -ENOMEM;
1431
1432	tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
1433	tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
1434	tx_descriptor->data3 = (frame_length << 16) &
1435		TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
1436
1437	buffer_info->skb = NULL;
1438	buffer_info->dma_ptr = dma_ptr;
1439	buffer_info->buffer_length = first_buffer_length;
1440	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1441
1442	tx->frame_data0 = (first_buffer_length &
1443		TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1444		TX_DESC_DATA0_DTYPE_DATA_ |
1445		TX_DESC_DATA0_FS_ |
1446		TX_DESC_DATA0_FCS_;
1447	if (time_stamp)
1448		tx->frame_data0 |= TX_DESC_DATA0_TSE_;
1449
1450	if (check_sum)
1451		tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
1452				   TX_DESC_DATA0_IPE_ |
1453				   TX_DESC_DATA0_TPE_;
1454
1455	/* data0 will be programmed in one of other frame assembler functions */
1456	return 0;
1457}
1458
1459static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
1460				     unsigned int frame_length,
1461				     int nr_frags)
1462{
1463	/* called only from within lan743x_tx_xmit_frame.
1464	 * assuming tx->ring_lock has already been acquired.
1465	 */
1466	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1467	struct lan743x_tx_buffer_info *buffer_info = NULL;
1468
1469	/* wrap up previous descriptor */
1470	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
1471	if (nr_frags <= 0) {
1472		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1473		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1474	}
1475	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1476	tx_descriptor->data0 = tx->frame_data0;
1477
1478	/* move to next descriptor */
1479	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1480	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1481	buffer_info = &tx->buffer_info[tx->frame_tail];
1482
1483	/* add extension descriptor */
1484	tx_descriptor->data1 = 0;
1485	tx_descriptor->data2 = 0;
1486	tx_descriptor->data3 = 0;
1487
1488	buffer_info->skb = NULL;
1489	buffer_info->dma_ptr = 0;
1490	buffer_info->buffer_length = 0;
1491	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1492
1493	tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
1494			  TX_DESC_DATA0_DTYPE_EXT_ |
1495			  TX_DESC_DATA0_EXT_LSO_;
1496
1497	/* data0 will be programmed in one of other frame assembler functions */
1498}
1499
1500static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
1501					 const skb_frag_t *fragment,
1502					 unsigned int frame_length)
1503{
1504	/* called only from within lan743x_tx_xmit_frame
1505	 * assuming tx->ring_lock has already been acquired
1506	 */
1507	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1508	struct lan743x_tx_buffer_info *buffer_info = NULL;
1509	struct lan743x_adapter *adapter = tx->adapter;
1510	struct device *dev = &adapter->pdev->dev;
1511	unsigned int fragment_length = 0;
1512	dma_addr_t dma_ptr;
1513
1514	fragment_length = skb_frag_size(fragment);
1515	if (!fragment_length)
1516		return 0;
1517
1518	/* wrap up previous descriptor */
1519	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1520	tx_descriptor->data0 = tx->frame_data0;
1521
1522	/* move to next descriptor */
1523	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1524	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1525	buffer_info = &tx->buffer_info[tx->frame_tail];
1526	dma_ptr = skb_frag_dma_map(dev, fragment,
1527				   0, fragment_length,
1528				   DMA_TO_DEVICE);
1529	if (dma_mapping_error(dev, dma_ptr)) {
1530		int desc_index;
1531
1532		/* cleanup all previously setup descriptors */
1533		desc_index = tx->frame_first;
1534		while (desc_index != tx->frame_tail) {
1535			lan743x_tx_release_desc(tx, desc_index, true);
1536			desc_index = lan743x_tx_next_index(tx, desc_index);
1537		}
1538		dma_wmb();
1539		tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1540		tx->frame_first = 0;
1541		tx->frame_data0 = 0;
1542		tx->frame_tail = 0;
1543		return -ENOMEM;
1544	}
1545
1546	tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
1547	tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
1548	tx_descriptor->data3 = (frame_length << 16) &
1549			       TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
1550
1551	buffer_info->skb = NULL;
1552	buffer_info->dma_ptr = dma_ptr;
1553	buffer_info->buffer_length = fragment_length;
1554	buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1555	buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
1556
1557	tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1558			  TX_DESC_DATA0_DTYPE_DATA_ |
1559			  TX_DESC_DATA0_FCS_;
1560
1561	/* data0 will be programmed in one of other frame assembler functions */
1562	return 0;
1563}
1564
1565static void lan743x_tx_frame_end(struct lan743x_tx *tx,
1566				 struct sk_buff *skb,
1567				 bool time_stamp,
1568				 bool ignore_sync)
1569{
1570	/* called only from within lan743x_tx_xmit_frame
1571	 * assuming tx->ring_lock has already been acquired
1572	 */
1573	struct lan743x_tx_descriptor *tx_descriptor = NULL;
1574	struct lan743x_tx_buffer_info *buffer_info = NULL;
1575	struct lan743x_adapter *adapter = tx->adapter;
1576	u32 tx_tail_flags = 0;
1577
1578	/* wrap up previous descriptor */
1579	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
1580	    TX_DESC_DATA0_DTYPE_DATA_) {
1581		tx->frame_data0 |= TX_DESC_DATA0_LS_;
1582		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1583	}
1584
1585	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1586	buffer_info = &tx->buffer_info[tx->frame_tail];
1587	buffer_info->skb = skb;
1588	if (time_stamp)
1589		buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
1590	if (ignore_sync)
1591		buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
1592
1593	tx_descriptor->data0 = tx->frame_data0;
1594	tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1595	tx->last_tail = tx->frame_tail;
1596
1597	dma_wmb();
1598
1599	if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
1600		tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
1601	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
1602		tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
1603		TX_TAIL_SET_TOP_INT_EN_;
1604
1605	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1606			  tx_tail_flags | tx->frame_tail);
1607	tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1608}
1609
1610static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
1611					 struct sk_buff *skb)
1612{
1613	int required_number_of_descriptors = 0;
1614	unsigned int start_frame_length = 0;
 
1615	unsigned int frame_length = 0;
1616	unsigned int head_length = 0;
1617	unsigned long irq_flags = 0;
1618	bool do_timestamp = false;
1619	bool ignore_sync = false;
 
1620	int nr_frags = 0;
1621	bool gso = false;
1622	int j;
1623
1624	required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
1625
1626	spin_lock_irqsave(&tx->ring_lock, irq_flags);
1627	if (required_number_of_descriptors >
1628		lan743x_tx_get_avail_desc(tx)) {
1629		if (required_number_of_descriptors > (tx->ring_size - 1)) {
1630			dev_kfree_skb(skb);
1631		} else {
1632			/* save to overflow buffer */
1633			tx->overflow_skb = skb;
1634			netif_stop_queue(tx->adapter->netdev);
 
 
 
1635		}
1636		goto unlock;
1637	}
1638
1639	/* space available, transmit skb  */
1640	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1641	    (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) &&
1642	    (lan743x_ptp_request_tx_timestamp(tx->adapter))) {
1643		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1644		do_timestamp = true;
1645		if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
1646			ignore_sync = true;
1647	}
1648	head_length = skb_headlen(skb);
1649	frame_length = skb_pagelen(skb);
1650	nr_frags = skb_shinfo(skb)->nr_frags;
1651	start_frame_length = frame_length;
1652	gso = skb_is_gso(skb);
1653	if (gso) {
1654		start_frame_length = max(skb_shinfo(skb)->gso_size,
1655					 (unsigned short)8);
1656	}
1657
1658	if (lan743x_tx_frame_start(tx,
1659				   skb->data, head_length,
1660				   start_frame_length,
1661				   do_timestamp,
1662				   skb->ip_summed == CHECKSUM_PARTIAL)) {
1663		dev_kfree_skb(skb);
1664		goto unlock;
1665	}
 
1666
1667	if (gso)
1668		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
1669
1670	if (nr_frags <= 0)
1671		goto finish;
1672
1673	for (j = 0; j < nr_frags; j++) {
1674		const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
1675
1676		if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
1677			/* upon error no need to call
1678			 *	lan743x_tx_frame_end
1679			 * frame assembler clean up was performed inside
1680			 *	lan743x_tx_frame_add_fragment
1681			 */
1682			dev_kfree_skb(skb);
1683			goto unlock;
1684		}
1685	}
1686
1687finish:
1688	lan743x_tx_frame_end(tx, skb, do_timestamp, ignore_sync);
1689
1690unlock:
1691	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
1692	return NETDEV_TX_OK;
1693}
1694
1695static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
1696{
1697	struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
1698	struct lan743x_adapter *adapter = tx->adapter;
1699	bool start_transmitter = false;
1700	unsigned long irq_flags = 0;
 
1701	u32 ioc_bit = 0;
1702	u32 int_sts = 0;
1703
1704	ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
1705	int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
1706	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
1707		lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
1708	spin_lock_irqsave(&tx->ring_lock, irq_flags);
1709
1710	/* clean up tx ring */
1711	lan743x_tx_release_completed_descriptors(tx);
1712	if (netif_queue_stopped(adapter->netdev)) {
1713		if (tx->overflow_skb) {
1714			if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
1715				lan743x_tx_get_avail_desc(tx))
1716				start_transmitter = true;
 
 
 
1717		} else {
1718			netif_wake_queue(adapter->netdev);
1719		}
1720	}
1721	spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
1722
1723	if (start_transmitter) {
1724		/* space is now available, transmit overflow skb */
1725		lan743x_tx_xmit_frame(tx, tx->overflow_skb);
1726		tx->overflow_skb = NULL;
1727		netif_wake_queue(adapter->netdev);
1728	}
1729
1730	if (!napi_complete(napi))
1731		goto done;
1732
1733	/* enable isr */
1734	lan743x_csr_write(adapter, INT_EN_SET,
1735			  INT_BIT_DMA_TX_(tx->channel_number));
1736	lan743x_csr_read(adapter, INT_STS);
1737
1738done:
1739	return 0;
1740}
1741
1742static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
1743{
1744	if (tx->head_cpu_ptr) {
1745		dma_free_coherent(&tx->adapter->pdev->dev,
1746				  sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr,
1747				  tx->head_dma_ptr);
1748		tx->head_cpu_ptr = NULL;
1749		tx->head_dma_ptr = 0;
1750	}
1751	kfree(tx->buffer_info);
1752	tx->buffer_info = NULL;
1753
1754	if (tx->ring_cpu_ptr) {
1755		dma_free_coherent(&tx->adapter->pdev->dev,
1756				  tx->ring_allocation_size, tx->ring_cpu_ptr,
1757				  tx->ring_dma_ptr);
1758		tx->ring_allocation_size = 0;
1759		tx->ring_cpu_ptr = NULL;
1760		tx->ring_dma_ptr = 0;
1761	}
1762	tx->ring_size = 0;
1763}
1764
1765static int lan743x_tx_ring_init(struct lan743x_tx *tx)
1766{
1767	size_t ring_allocation_size = 0;
1768	void *cpu_ptr = NULL;
1769	dma_addr_t dma_ptr;
1770	int ret = -ENOMEM;
1771
1772	tx->ring_size = LAN743X_TX_RING_SIZE;
1773	if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
1774		ret = -EINVAL;
1775		goto cleanup;
1776	}
 
 
 
 
 
 
 
1777	ring_allocation_size = ALIGN(tx->ring_size *
1778				     sizeof(struct lan743x_tx_descriptor),
1779				     PAGE_SIZE);
1780	dma_ptr = 0;
1781	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
1782				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
1783	if (!cpu_ptr) {
1784		ret = -ENOMEM;
1785		goto cleanup;
1786	}
1787
1788	tx->ring_allocation_size = ring_allocation_size;
1789	tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
1790	tx->ring_dma_ptr = dma_ptr;
1791
1792	cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
1793	if (!cpu_ptr) {
1794		ret = -ENOMEM;
1795		goto cleanup;
1796	}
1797	tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
1798	dma_ptr = 0;
1799	cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
1800				     sizeof(*tx->head_cpu_ptr), &dma_ptr,
1801				     GFP_KERNEL);
1802	if (!cpu_ptr) {
1803		ret = -ENOMEM;
1804		goto cleanup;
1805	}
1806
1807	tx->head_cpu_ptr = cpu_ptr;
1808	tx->head_dma_ptr = dma_ptr;
1809	if (tx->head_dma_ptr & 0x3) {
1810		ret = -ENOMEM;
1811		goto cleanup;
1812	}
1813
1814	return 0;
1815
1816cleanup:
1817	lan743x_tx_ring_cleanup(tx);
1818	return ret;
1819}
1820
1821static void lan743x_tx_close(struct lan743x_tx *tx)
1822{
1823	struct lan743x_adapter *adapter = tx->adapter;
1824
1825	lan743x_csr_write(adapter,
1826			  DMAC_CMD,
1827			  DMAC_CMD_STOP_T_(tx->channel_number));
1828	lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
1829
1830	lan743x_csr_write(adapter,
1831			  DMAC_INT_EN_CLR,
1832			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
1833	lan743x_csr_write(adapter, INT_EN_CLR,
1834			  INT_BIT_DMA_TX_(tx->channel_number));
1835	napi_disable(&tx->napi);
1836	netif_napi_del(&tx->napi);
1837
1838	lan743x_csr_write(adapter, FCT_TX_CTL,
1839			  FCT_TX_CTL_DIS_(tx->channel_number));
1840	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
1841				 FCT_TX_CTL_EN_(tx->channel_number),
1842				 0, 1000, 20000, 100);
1843
1844	lan743x_tx_release_all_descriptors(tx);
1845
1846	if (tx->overflow_skb) {
1847		dev_kfree_skb(tx->overflow_skb);
1848		tx->overflow_skb = NULL;
1849	}
1850
1851	lan743x_tx_ring_cleanup(tx);
1852}
1853
1854static int lan743x_tx_open(struct lan743x_tx *tx)
1855{
1856	struct lan743x_adapter *adapter = NULL;
1857	u32 data = 0;
1858	int ret;
1859
1860	adapter = tx->adapter;
1861	ret = lan743x_tx_ring_init(tx);
1862	if (ret)
1863		return ret;
1864
1865	/* initialize fifo */
1866	lan743x_csr_write(adapter, FCT_TX_CTL,
1867			  FCT_TX_CTL_RESET_(tx->channel_number));
1868	lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
1869				 FCT_TX_CTL_RESET_(tx->channel_number),
1870				 0, 1000, 20000, 100);
1871
1872	/* enable fifo */
1873	lan743x_csr_write(adapter, FCT_TX_CTL,
1874			  FCT_TX_CTL_EN_(tx->channel_number));
1875
1876	/* reset tx channel */
1877	lan743x_csr_write(adapter, DMAC_CMD,
1878			  DMAC_CMD_TX_SWR_(tx->channel_number));
1879	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
1880				 DMAC_CMD_TX_SWR_(tx->channel_number),
1881				 0, 1000, 20000, 100);
1882
1883	/* Write TX_BASE_ADDR */
1884	lan743x_csr_write(adapter,
1885			  TX_BASE_ADDRH(tx->channel_number),
1886			  DMA_ADDR_HIGH32(tx->ring_dma_ptr));
1887	lan743x_csr_write(adapter,
1888			  TX_BASE_ADDRL(tx->channel_number),
1889			  DMA_ADDR_LOW32(tx->ring_dma_ptr));
1890
1891	/* Write TX_CFG_B */
1892	data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
1893	data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
1894	data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
1895	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1896		data |= TX_CFG_B_TDMABL_512_;
1897	lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
1898
1899	/* Write TX_CFG_A */
1900	data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
1901	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
1902		data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
1903		data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
1904		data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
1905		data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
1906	}
1907	lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
1908
1909	/* Write TX_HEAD_WRITEBACK_ADDR */
1910	lan743x_csr_write(adapter,
1911			  TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
1912			  DMA_ADDR_HIGH32(tx->head_dma_ptr));
1913	lan743x_csr_write(adapter,
1914			  TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
1915			  DMA_ADDR_LOW32(tx->head_dma_ptr));
1916
1917	/* set last head */
1918	tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
1919
1920	/* write TX_TAIL */
1921	tx->last_tail = 0;
1922	lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1923			  (u32)(tx->last_tail));
1924	tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
1925							 INT_BIT_DMA_TX_
1926							 (tx->channel_number));
1927	netif_tx_napi_add(adapter->netdev,
1928			  &tx->napi, lan743x_tx_napi_poll,
1929			  tx->ring_size - 1);
1930	napi_enable(&tx->napi);
1931
1932	data = 0;
1933	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
1934		data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
1935	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
1936		data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
1937	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
1938		data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
1939	if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
1940		data |= TX_CFG_C_TX_INT_EN_R2C_;
1941	lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
1942
1943	if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
1944		lan743x_csr_write(adapter, INT_EN_SET,
1945				  INT_BIT_DMA_TX_(tx->channel_number));
1946	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
1947			  DMAC_INT_BIT_TX_IOC_(tx->channel_number));
1948
1949	/*  start dmac channel */
1950	lan743x_csr_write(adapter, DMAC_CMD,
1951			  DMAC_CMD_START_T_(tx->channel_number));
1952	return 0;
1953}
1954
1955static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
1956{
1957	return ((++index) % rx->ring_size);
1958}
1959
1960static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
1961{
1962	int length = 0;
1963
1964	length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
1965	return __netdev_alloc_skb(rx->adapter->netdev,
1966				  length, GFP_ATOMIC | GFP_DMA);
1967}
1968
1969static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
1970					struct sk_buff *skb)
1971{
 
 
1972	struct lan743x_rx_buffer_info *buffer_info;
 
1973	struct lan743x_rx_descriptor *descriptor;
1974	int length = 0;
 
 
 
1975
1976	length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
1977	descriptor = &rx->ring_cpu_ptr[index];
1978	buffer_info = &rx->buffer_info[index];
1979	buffer_info->skb = skb;
1980	if (!(buffer_info->skb))
1981		return -ENOMEM;
1982	buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
1983					      buffer_info->skb->data,
1984					      length,
1985					      DMA_FROM_DEVICE);
1986	if (dma_mapping_error(&rx->adapter->pdev->dev,
1987			      buffer_info->dma_ptr)) {
1988		buffer_info->dma_ptr = 0;
1989		return -ENOMEM;
1990	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1991
1992	buffer_info->buffer_length = length;
1993	descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
1994	descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
 
 
1995	descriptor->data3 = 0;
1996	descriptor->data0 = (RX_DESC_DATA0_OWN_ |
1997			    (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
1998	skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
1999
2000	return 0;
2001}
2002
2003static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
2004{
2005	struct lan743x_rx_buffer_info *buffer_info;
2006	struct lan743x_rx_descriptor *descriptor;
2007
2008	descriptor = &rx->ring_cpu_ptr[index];
2009	buffer_info = &rx->buffer_info[index];
2010
2011	descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
2012	descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
2013	descriptor->data3 = 0;
2014	descriptor->data0 = (RX_DESC_DATA0_OWN_ |
2015			    ((buffer_info->buffer_length) &
2016			    RX_DESC_DATA0_BUF_LENGTH_MASK_));
 
2017}
2018
2019static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
2020{
2021	struct lan743x_rx_buffer_info *buffer_info;
2022	struct lan743x_rx_descriptor *descriptor;
2023
2024	descriptor = &rx->ring_cpu_ptr[index];
2025	buffer_info = &rx->buffer_info[index];
2026
2027	memset(descriptor, 0, sizeof(*descriptor));
2028
2029	if (buffer_info->dma_ptr) {
2030		dma_unmap_single(&rx->adapter->pdev->dev,
2031				 buffer_info->dma_ptr,
2032				 buffer_info->buffer_length,
2033				 DMA_FROM_DEVICE);
2034		buffer_info->dma_ptr = 0;
2035	}
2036
2037	if (buffer_info->skb) {
2038		dev_kfree_skb(buffer_info->skb);
2039		buffer_info->skb = NULL;
2040	}
2041
2042	memset(buffer_info, 0, sizeof(*buffer_info));
2043}
2044
2045static int lan743x_rx_process_packet(struct lan743x_rx *rx)
 
2046{
2047	struct skb_shared_hwtstamps *hwtstamps = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2048	int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
2049	int current_head_index = *rx->head_cpu_ptr;
2050	struct lan743x_rx_buffer_info *buffer_info;
2051	struct lan743x_rx_descriptor *descriptor;
 
2052	int extension_index = -1;
2053	int first_index = -1;
2054	int last_index = -1;
2055
2056	if (current_head_index < 0 || current_head_index >= rx->ring_size)
2057		goto done;
2058
2059	if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
2060		goto done;
2061
2062	if (rx->last_head != current_head_index) {
2063		descriptor = &rx->ring_cpu_ptr[rx->last_head];
2064		if (descriptor->data0 & RX_DESC_DATA0_OWN_)
2065			goto done;
 
 
 
2066
2067		if (!(descriptor->data0 & RX_DESC_DATA0_FS_))
2068			goto done;
2069
2070		first_index = rx->last_head;
2071		if (descriptor->data0 & RX_DESC_DATA0_LS_) {
2072			last_index = rx->last_head;
2073		} else {
2074			int index;
2075
2076			index = lan743x_rx_next_index(rx, first_index);
2077			while (index != current_head_index) {
2078				descriptor = &rx->ring_cpu_ptr[index];
2079				if (descriptor->data0 & RX_DESC_DATA0_OWN_)
2080					goto done;
2081
2082				if (descriptor->data0 & RX_DESC_DATA0_LS_) {
2083					last_index = index;
2084					break;
2085				}
2086				index = lan743x_rx_next_index(rx, index);
2087			}
2088		}
2089		if (last_index >= 0) {
2090			descriptor = &rx->ring_cpu_ptr[last_index];
2091			if (descriptor->data0 & RX_DESC_DATA0_EXT_) {
2092				/* extension is expected to follow */
2093				int index = lan743x_rx_next_index(rx,
2094								  last_index);
2095				if (index != current_head_index) {
2096					descriptor = &rx->ring_cpu_ptr[index];
2097					if (descriptor->data0 &
2098					    RX_DESC_DATA0_OWN_) {
2099						goto done;
2100					}
2101					if (descriptor->data0 &
2102					    RX_DESC_DATA0_EXT_) {
2103						extension_index = index;
2104					} else {
2105						goto done;
2106					}
2107				} else {
2108					/* extension is not yet available */
2109					/* prevent processing of this packet */
2110					first_index = -1;
2111					last_index = -1;
2112				}
2113			}
2114		}
2115	}
2116	if (first_index >= 0 && last_index >= 0) {
2117		int real_last_index = last_index;
2118		struct sk_buff *skb = NULL;
2119		u32 ts_sec = 0;
2120		u32 ts_nsec = 0;
2121
2122		/* packet is available */
2123		if (first_index == last_index) {
2124			/* single buffer packet */
2125			struct sk_buff *new_skb = NULL;
2126			int packet_length;
2127
2128			new_skb = lan743x_rx_allocate_skb(rx);
2129			if (!new_skb) {
2130				/* failed to allocate next skb.
2131				 * Memory is very low.
2132				 * Drop this packet and reuse buffer.
2133				 */
2134				lan743x_rx_reuse_ring_element(rx, first_index);
2135				goto process_extension;
2136			}
2137
2138			buffer_info = &rx->buffer_info[first_index];
2139			skb = buffer_info->skb;
2140			descriptor = &rx->ring_cpu_ptr[first_index];
2141
2142			/* unmap from dma */
2143			if (buffer_info->dma_ptr) {
2144				dma_unmap_single(&rx->adapter->pdev->dev,
2145						 buffer_info->dma_ptr,
2146						 buffer_info->buffer_length,
2147						 DMA_FROM_DEVICE);
2148				buffer_info->dma_ptr = 0;
2149				buffer_info->buffer_length = 0;
2150			}
2151			buffer_info->skb = NULL;
2152			packet_length =	RX_DESC_DATA0_FRAME_LENGTH_GET_
2153					(descriptor->data0);
2154			skb_put(skb, packet_length - 4);
2155			skb->protocol = eth_type_trans(skb,
2156						       rx->adapter->netdev);
2157			lan743x_rx_init_ring_element(rx, first_index, new_skb);
2158		} else {
2159			int index = first_index;
2160
2161			/* multi buffer packet not supported */
2162			/* this should not happen since
2163			 * buffers are allocated to be at least jumbo size
2164			 */
2165
2166			/* clean up buffers */
2167			if (first_index <= last_index) {
2168				while ((index >= first_index) &&
2169				       (index <= last_index)) {
2170					lan743x_rx_reuse_ring_element(rx,
2171								      index);
2172					index = lan743x_rx_next_index(rx,
2173								      index);
2174				}
2175			} else {
2176				while ((index >= first_index) ||
2177				       (index <= last_index)) {
2178					lan743x_rx_reuse_ring_element(rx,
2179								      index);
2180					index = lan743x_rx_next_index(rx,
2181								      index);
2182				}
2183			}
2184		}
 
 
 
 
 
 
 
 
 
 
2185
2186process_extension:
2187		if (extension_index >= 0) {
2188			descriptor = &rx->ring_cpu_ptr[extension_index];
2189			buffer_info = &rx->buffer_info[extension_index];
2190
2191			ts_sec = descriptor->data1;
2192			ts_nsec = (descriptor->data2 &
2193				  RX_DESC_DATA2_TS_NS_MASK_);
2194			lan743x_rx_reuse_ring_element(rx, extension_index);
2195			real_last_index = extension_index;
2196		}
2197
2198		if (!skb) {
2199			result = RX_PROCESS_RESULT_PACKET_DROPPED;
2200			goto move_forward;
2201		}
2202
2203		if (extension_index < 0)
2204			goto pass_packet_to_os;
2205		hwtstamps = skb_hwtstamps(skb);
2206		if (hwtstamps)
2207			hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec);
2208
2209pass_packet_to_os:
2210		/* pass packet to OS */
2211		napi_gro_receive(&rx->napi, skb);
2212		result = RX_PROCESS_RESULT_PACKET_RECEIVED;
 
 
 
 
2213
2214move_forward:
2215		/* push tail and head forward */
2216		rx->last_tail = real_last_index;
2217		rx->last_head = lan743x_rx_next_index(rx, real_last_index);
2218	}
2219done:
2220	return result;
2221}
2222
2223static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
2224{
2225	struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
2226	struct lan743x_adapter *adapter = rx->adapter;
 
2227	u32 rx_tail_flags = 0;
2228	int count;
2229
2230	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
2231		/* clear int status bit before reading packet */
2232		lan743x_csr_write(adapter, DMAC_INT_STS,
2233				  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2234	}
2235	count = 0;
2236	while (count < weight) {
2237		int rx_process_result = lan743x_rx_process_packet(rx);
2238
2239		if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) {
2240			count++;
2241		} else if (rx_process_result ==
2242			RX_PROCESS_RESULT_NOTHING_TO_DO) {
2243			break;
2244		} else if (rx_process_result ==
2245			RX_PROCESS_RESULT_PACKET_DROPPED) {
2246			continue;
2247		}
2248	}
2249	rx->frame_count += count;
2250	if (count == weight)
2251		goto done;
2252
2253	if (!napi_complete_done(napi, count))
2254		goto done;
2255
 
2256	if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2257		rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
2258	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
2259		rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
2260	} else {
2261		lan743x_csr_write(adapter, INT_EN_SET,
2262				  INT_BIT_DMA_RX_(rx->channel_number));
2263	}
2264
2265	/* update RX_TAIL */
2266	lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2267			  rx_tail_flags | rx->last_tail);
2268done:
2269	return count;
2270}
2271
2272static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
2273{
2274	if (rx->buffer_info && rx->ring_cpu_ptr) {
2275		int index;
2276
2277		for (index = 0; index < rx->ring_size; index++)
2278			lan743x_rx_release_ring_element(rx, index);
2279	}
2280
2281	if (rx->head_cpu_ptr) {
2282		dma_free_coherent(&rx->adapter->pdev->dev,
2283				  sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr,
2284				  rx->head_dma_ptr);
2285		rx->head_cpu_ptr = NULL;
2286		rx->head_dma_ptr = 0;
2287	}
2288
2289	kfree(rx->buffer_info);
2290	rx->buffer_info = NULL;
2291
2292	if (rx->ring_cpu_ptr) {
2293		dma_free_coherent(&rx->adapter->pdev->dev,
2294				  rx->ring_allocation_size, rx->ring_cpu_ptr,
2295				  rx->ring_dma_ptr);
2296		rx->ring_allocation_size = 0;
2297		rx->ring_cpu_ptr = NULL;
2298		rx->ring_dma_ptr = 0;
2299	}
2300
2301	rx->ring_size = 0;
2302	rx->last_head = 0;
2303}
2304
2305static int lan743x_rx_ring_init(struct lan743x_rx *rx)
2306{
2307	size_t ring_allocation_size = 0;
2308	dma_addr_t dma_ptr = 0;
2309	void *cpu_ptr = NULL;
2310	int ret = -ENOMEM;
2311	int index = 0;
2312
2313	rx->ring_size = LAN743X_RX_RING_SIZE;
2314	if (rx->ring_size <= 1) {
2315		ret = -EINVAL;
2316		goto cleanup;
2317	}
2318	if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
2319		ret = -EINVAL;
2320		goto cleanup;
2321	}
 
 
 
 
 
 
 
2322	ring_allocation_size = ALIGN(rx->ring_size *
2323				     sizeof(struct lan743x_rx_descriptor),
2324				     PAGE_SIZE);
2325	dma_ptr = 0;
2326	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2327				     ring_allocation_size, &dma_ptr, GFP_KERNEL);
2328	if (!cpu_ptr) {
2329		ret = -ENOMEM;
2330		goto cleanup;
2331	}
2332	rx->ring_allocation_size = ring_allocation_size;
2333	rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
2334	rx->ring_dma_ptr = dma_ptr;
2335
2336	cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
2337			  GFP_KERNEL);
2338	if (!cpu_ptr) {
2339		ret = -ENOMEM;
2340		goto cleanup;
2341	}
2342	rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
2343	dma_ptr = 0;
2344	cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
2345				     sizeof(*rx->head_cpu_ptr), &dma_ptr,
2346				     GFP_KERNEL);
2347	if (!cpu_ptr) {
2348		ret = -ENOMEM;
2349		goto cleanup;
2350	}
2351
2352	rx->head_cpu_ptr = cpu_ptr;
2353	rx->head_dma_ptr = dma_ptr;
2354	if (rx->head_dma_ptr & 0x3) {
2355		ret = -ENOMEM;
2356		goto cleanup;
2357	}
2358
2359	rx->last_head = 0;
2360	for (index = 0; index < rx->ring_size; index++) {
2361		struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
2362
2363		ret = lan743x_rx_init_ring_element(rx, index, new_skb);
2364		if (ret)
2365			goto cleanup;
2366	}
2367	return 0;
2368
2369cleanup:
 
 
 
2370	lan743x_rx_ring_cleanup(rx);
2371	return ret;
2372}
2373
2374static void lan743x_rx_close(struct lan743x_rx *rx)
2375{
2376	struct lan743x_adapter *adapter = rx->adapter;
2377
2378	lan743x_csr_write(adapter, FCT_RX_CTL,
2379			  FCT_RX_CTL_DIS_(rx->channel_number));
2380	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2381				 FCT_RX_CTL_EN_(rx->channel_number),
2382				 0, 1000, 20000, 100);
2383
2384	lan743x_csr_write(adapter, DMAC_CMD,
2385			  DMAC_CMD_STOP_R_(rx->channel_number));
2386	lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
2387
2388	lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
2389			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2390	lan743x_csr_write(adapter, INT_EN_CLR,
2391			  INT_BIT_DMA_RX_(rx->channel_number));
2392	napi_disable(&rx->napi);
2393
2394	netif_napi_del(&rx->napi);
2395
2396	lan743x_rx_ring_cleanup(rx);
2397}
2398
2399static int lan743x_rx_open(struct lan743x_rx *rx)
2400{
2401	struct lan743x_adapter *adapter = rx->adapter;
2402	u32 data = 0;
2403	int ret;
2404
2405	rx->frame_count = 0;
2406	ret = lan743x_rx_ring_init(rx);
2407	if (ret)
2408		goto return_error;
2409
2410	netif_napi_add(adapter->netdev,
2411		       &rx->napi, lan743x_rx_napi_poll,
2412		       rx->ring_size - 1);
2413
2414	lan743x_csr_write(adapter, DMAC_CMD,
2415			  DMAC_CMD_RX_SWR_(rx->channel_number));
2416	lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2417				 DMAC_CMD_RX_SWR_(rx->channel_number),
2418				 0, 1000, 20000, 100);
2419
2420	/* set ring base address */
2421	lan743x_csr_write(adapter,
2422			  RX_BASE_ADDRH(rx->channel_number),
2423			  DMA_ADDR_HIGH32(rx->ring_dma_ptr));
2424	lan743x_csr_write(adapter,
2425			  RX_BASE_ADDRL(rx->channel_number),
2426			  DMA_ADDR_LOW32(rx->ring_dma_ptr));
2427
2428	/* set rx write back address */
2429	lan743x_csr_write(adapter,
2430			  RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
2431			  DMA_ADDR_HIGH32(rx->head_dma_ptr));
2432	lan743x_csr_write(adapter,
2433			  RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
2434			  DMA_ADDR_LOW32(rx->head_dma_ptr));
2435	data = RX_CFG_A_RX_HP_WB_EN_;
2436	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2437		data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
2438			RX_CFG_A_RX_WB_THRES_SET_(0x7) |
2439			RX_CFG_A_RX_PF_THRES_SET_(16) |
2440			RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
2441	}
2442
2443	/* set RX_CFG_A */
2444	lan743x_csr_write(adapter,
2445			  RX_CFG_A(rx->channel_number), data);
2446
2447	/* set RX_CFG_B */
2448	data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
2449	data &= ~RX_CFG_B_RX_PAD_MASK_;
2450	if (!RX_HEAD_PADDING)
2451		data |= RX_CFG_B_RX_PAD_0_;
2452	else
2453		data |= RX_CFG_B_RX_PAD_2_;
2454	data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
2455	data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
2456	data |= RX_CFG_B_TS_ALL_RX_;
2457	if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2458		data |= RX_CFG_B_RDMABL_512_;
2459
2460	lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
2461	rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2462							 INT_BIT_DMA_RX_
2463							 (rx->channel_number));
2464
2465	/* set RX_CFG_C */
2466	data = 0;
2467	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2468		data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
2469	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2470		data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
2471	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2472		data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
2473	if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2474		data |= RX_CFG_C_RX_INT_EN_R2C_;
2475	lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
2476
2477	rx->last_tail = ((u32)(rx->ring_size - 1));
2478	lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2479			  rx->last_tail);
2480	rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
2481	if (rx->last_head) {
2482		ret = -EIO;
2483		goto napi_delete;
2484	}
2485
2486	napi_enable(&rx->napi);
2487
2488	lan743x_csr_write(adapter, INT_EN_SET,
2489			  INT_BIT_DMA_RX_(rx->channel_number));
2490	lan743x_csr_write(adapter, DMAC_INT_STS,
2491			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2492	lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2493			  DMAC_INT_BIT_RXFRM_(rx->channel_number));
2494	lan743x_csr_write(adapter, DMAC_CMD,
2495			  DMAC_CMD_START_R_(rx->channel_number));
2496
2497	/* initialize fifo */
2498	lan743x_csr_write(adapter, FCT_RX_CTL,
2499			  FCT_RX_CTL_RESET_(rx->channel_number));
2500	lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2501				 FCT_RX_CTL_RESET_(rx->channel_number),
2502				 0, 1000, 20000, 100);
2503	lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
2504			  FCT_FLOW_CTL_REQ_EN_ |
2505			  FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
2506			  FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
2507
2508	/* enable fifo */
2509	lan743x_csr_write(adapter, FCT_RX_CTL,
2510			  FCT_RX_CTL_EN_(rx->channel_number));
2511	return 0;
2512
2513napi_delete:
2514	netif_napi_del(&rx->napi);
2515	lan743x_rx_ring_cleanup(rx);
2516
2517return_error:
2518	return ret;
2519}
2520
2521static int lan743x_netdev_close(struct net_device *netdev)
2522{
2523	struct lan743x_adapter *adapter = netdev_priv(netdev);
2524	int index;
2525
2526	lan743x_tx_close(&adapter->tx[0]);
 
2527
2528	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
2529		lan743x_rx_close(&adapter->rx[index]);
2530
2531	lan743x_ptp_close(adapter);
2532
2533	lan743x_phy_close(adapter);
2534
2535	lan743x_mac_close(adapter);
2536
2537	lan743x_intr_close(adapter);
2538
2539	return 0;
2540}
2541
2542static int lan743x_netdev_open(struct net_device *netdev)
2543{
2544	struct lan743x_adapter *adapter = netdev_priv(netdev);
2545	int index;
2546	int ret;
2547
2548	ret = lan743x_intr_open(adapter);
2549	if (ret)
2550		goto return_error;
2551
2552	ret = lan743x_mac_open(adapter);
2553	if (ret)
2554		goto close_intr;
2555
2556	ret = lan743x_phy_open(adapter);
2557	if (ret)
2558		goto close_mac;
2559
2560	ret = lan743x_ptp_open(adapter);
2561	if (ret)
2562		goto close_phy;
2563
2564	lan743x_rfe_open(adapter);
2565
2566	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2567		ret = lan743x_rx_open(&adapter->rx[index]);
2568		if (ret)
2569			goto close_rx;
2570	}
2571
2572	ret = lan743x_tx_open(&adapter->tx[0]);
2573	if (ret)
2574		goto close_rx;
 
 
 
2575
2576	return 0;
 
 
 
 
2577
2578close_rx:
2579	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2580		if (adapter->rx[index].ring_cpu_ptr)
2581			lan743x_rx_close(&adapter->rx[index]);
2582	}
2583	lan743x_ptp_close(adapter);
2584
2585close_phy:
2586	lan743x_phy_close(adapter);
2587
2588close_mac:
2589	lan743x_mac_close(adapter);
2590
2591close_intr:
2592	lan743x_intr_close(adapter);
2593
2594return_error:
2595	netif_warn(adapter, ifup, adapter->netdev,
2596		   "Error opening LAN743x\n");
2597	return ret;
2598}
2599
2600static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
2601					     struct net_device *netdev)
2602{
2603	struct lan743x_adapter *adapter = netdev_priv(netdev);
 
 
 
 
2604
2605	return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
2606}
2607
2608static int lan743x_netdev_ioctl(struct net_device *netdev,
2609				struct ifreq *ifr, int cmd)
2610{
2611	if (!netif_running(netdev))
2612		return -EINVAL;
2613	if (cmd == SIOCSHWTSTAMP)
2614		return lan743x_ptp_ioctl(netdev, ifr, cmd);
2615	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
2616}
2617
2618static void lan743x_netdev_set_multicast(struct net_device *netdev)
2619{
2620	struct lan743x_adapter *adapter = netdev_priv(netdev);
2621
2622	lan743x_rfe_set_multicast(adapter);
2623}
2624
2625static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
2626{
2627	struct lan743x_adapter *adapter = netdev_priv(netdev);
2628	int ret = 0;
2629
2630	ret = lan743x_mac_set_mtu(adapter, new_mtu);
2631	if (!ret)
2632		netdev->mtu = new_mtu;
2633	return ret;
2634}
2635
2636static void lan743x_netdev_get_stats64(struct net_device *netdev,
2637				       struct rtnl_link_stats64 *stats)
2638{
2639	struct lan743x_adapter *adapter = netdev_priv(netdev);
2640
2641	stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
2642	stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
2643	stats->rx_bytes = lan743x_csr_read(adapter,
2644					   STAT_RX_UNICAST_BYTE_COUNT) +
2645			  lan743x_csr_read(adapter,
2646					   STAT_RX_BROADCAST_BYTE_COUNT) +
2647			  lan743x_csr_read(adapter,
2648					   STAT_RX_MULTICAST_BYTE_COUNT);
2649	stats->tx_bytes = lan743x_csr_read(adapter,
2650					   STAT_TX_UNICAST_BYTE_COUNT) +
2651			  lan743x_csr_read(adapter,
2652					   STAT_TX_BROADCAST_BYTE_COUNT) +
2653			  lan743x_csr_read(adapter,
2654					   STAT_TX_MULTICAST_BYTE_COUNT);
2655	stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
2656			   lan743x_csr_read(adapter,
2657					    STAT_RX_ALIGNMENT_ERRORS) +
2658			   lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
2659			   lan743x_csr_read(adapter,
2660					    STAT_RX_UNDERSIZE_FRAME_ERRORS) +
2661			   lan743x_csr_read(adapter,
2662					    STAT_RX_OVERSIZE_FRAME_ERRORS);
2663	stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
2664			   lan743x_csr_read(adapter,
2665					    STAT_TX_EXCESS_DEFERRAL_ERRORS) +
2666			   lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
2667	stats->rx_dropped = lan743x_csr_read(adapter,
2668					     STAT_RX_DROPPED_FRAMES);
2669	stats->tx_dropped = lan743x_csr_read(adapter,
2670					     STAT_TX_EXCESSIVE_COLLISION);
2671	stats->multicast = lan743x_csr_read(adapter,
2672					    STAT_RX_MULTICAST_FRAMES) +
2673			   lan743x_csr_read(adapter,
2674					    STAT_TX_MULTICAST_FRAMES);
2675	stats->collisions = lan743x_csr_read(adapter,
2676					     STAT_TX_SINGLE_COLLISIONS) +
2677			    lan743x_csr_read(adapter,
2678					     STAT_TX_MULTIPLE_COLLISIONS) +
2679			    lan743x_csr_read(adapter,
2680					     STAT_TX_LATE_COLLISIONS);
2681}
2682
2683static int lan743x_netdev_set_mac_address(struct net_device *netdev,
2684					  void *addr)
2685{
2686	struct lan743x_adapter *adapter = netdev_priv(netdev);
2687	struct sockaddr *sock_addr = addr;
2688	int ret;
2689
2690	ret = eth_prepare_mac_addr_change(netdev, sock_addr);
2691	if (ret)
2692		return ret;
2693	ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
2694	lan743x_mac_set_address(adapter, sock_addr->sa_data);
2695	lan743x_rfe_update_mac_address(adapter);
2696	return 0;
2697}
2698
2699static const struct net_device_ops lan743x_netdev_ops = {
2700	.ndo_open		= lan743x_netdev_open,
2701	.ndo_stop		= lan743x_netdev_close,
2702	.ndo_start_xmit		= lan743x_netdev_xmit_frame,
2703	.ndo_do_ioctl		= lan743x_netdev_ioctl,
2704	.ndo_set_rx_mode	= lan743x_netdev_set_multicast,
2705	.ndo_change_mtu		= lan743x_netdev_change_mtu,
2706	.ndo_get_stats64	= lan743x_netdev_get_stats64,
2707	.ndo_set_mac_address	= lan743x_netdev_set_mac_address,
2708};
2709
2710static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
2711{
2712	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
2713}
2714
2715static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
2716{
2717	mdiobus_unregister(adapter->mdiobus);
2718}
2719
2720static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
2721{
2722	unregister_netdev(adapter->netdev);
2723
2724	lan743x_mdiobus_cleanup(adapter);
2725	lan743x_hardware_cleanup(adapter);
2726	lan743x_pci_cleanup(adapter);
2727}
2728
2729static int lan743x_hardware_init(struct lan743x_adapter *adapter,
2730				 struct pci_dev *pdev)
2731{
2732	struct lan743x_tx *tx;
2733	int index;
2734	int ret;
2735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2736	adapter->intr.irq = adapter->pdev->irq;
2737	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
2738	mutex_init(&adapter->dp_lock);
2739
2740	ret = lan743x_gpio_init(adapter);
2741	if (ret)
2742		return ret;
2743
2744	ret = lan743x_mac_init(adapter);
2745	if (ret)
2746		return ret;
2747
2748	ret = lan743x_phy_init(adapter);
2749	if (ret)
2750		return ret;
2751
2752	ret = lan743x_ptp_init(adapter);
2753	if (ret)
2754		return ret;
2755
2756	lan743x_rfe_update_mac_address(adapter);
2757
2758	ret = lan743x_dmac_init(adapter);
2759	if (ret)
2760		return ret;
2761
2762	for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2763		adapter->rx[index].adapter = adapter;
2764		adapter->rx[index].channel_number = index;
2765	}
2766
2767	tx = &adapter->tx[0];
2768	tx->adapter = adapter;
2769	tx->channel_number = 0;
2770	spin_lock_init(&tx->ring_lock);
 
 
 
2771	return 0;
2772}
2773
2774static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
2775{
 
2776	int ret;
2777
2778	adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
2779	if (!(adapter->mdiobus)) {
2780		ret = -ENOMEM;
2781		goto return_error;
2782	}
2783
2784	adapter->mdiobus->priv = (void *)adapter;
2785	adapter->mdiobus->read = lan743x_mdiobus_read;
2786	adapter->mdiobus->write = lan743x_mdiobus_write;
2787	adapter->mdiobus->name = "lan743x-mdiobus";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2788	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
2789		 "pci-%s", pci_name(adapter->pdev));
2790
2791	if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
2792		/* LAN7430 uses internal phy at address 1 */
2793		adapter->mdiobus->phy_mask = ~(u32)BIT(1);
2794
2795	/* register mdiobus */
2796	ret = mdiobus_register(adapter->mdiobus);
2797	if (ret < 0)
2798		goto return_error;
2799	return 0;
2800
2801return_error:
2802	return ret;
2803}
2804
2805/* lan743x_pcidev_probe - Device Initialization Routine
2806 * @pdev: PCI device information struct
2807 * @id: entry in lan743x_pci_tbl
2808 *
2809 * Returns 0 on success, negative on failure
2810 *
2811 * initializes an adapter identified by a pci_dev structure.
2812 * The OS initialization, configuring of the adapter private structure,
2813 * and a hardware reset occur.
2814 **/
2815static int lan743x_pcidev_probe(struct pci_dev *pdev,
2816				const struct pci_device_id *id)
2817{
2818	struct lan743x_adapter *adapter = NULL;
2819	struct net_device *netdev = NULL;
2820	const void *mac_addr;
2821	int ret = -ENODEV;
2822
2823	netdev = devm_alloc_etherdev(&pdev->dev,
2824				     sizeof(struct lan743x_adapter));
 
 
 
 
 
 
 
 
 
 
 
2825	if (!netdev)
2826		goto return_error;
2827
2828	SET_NETDEV_DEV(netdev, &pdev->dev);
2829	pci_set_drvdata(pdev, netdev);
2830	adapter = netdev_priv(netdev);
2831	adapter->netdev = netdev;
2832	adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
2833			      NETIF_MSG_LINK | NETIF_MSG_IFUP |
2834			      NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
2835	netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
2836
2837	mac_addr = of_get_mac_address(pdev->dev.of_node);
2838	if (!IS_ERR(mac_addr))
2839		ether_addr_copy(adapter->mac_address, mac_addr);
2840
2841	ret = lan743x_pci_init(adapter, pdev);
2842	if (ret)
2843		goto return_error;
2844
2845	ret = lan743x_csr_init(adapter);
2846	if (ret)
2847		goto cleanup_pci;
2848
2849	ret = lan743x_hardware_init(adapter, pdev);
2850	if (ret)
2851		goto cleanup_pci;
2852
2853	ret = lan743x_mdiobus_init(adapter);
2854	if (ret)
2855		goto cleanup_hardware;
2856
2857	adapter->netdev->netdev_ops = &lan743x_netdev_ops;
2858	adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
2859	adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
 
2860	adapter->netdev->hw_features = adapter->netdev->features;
2861
2862	/* carrier off reporting is important to ethtool even BEFORE open */
2863	netif_carrier_off(netdev);
2864
2865	ret = register_netdev(adapter->netdev);
2866	if (ret < 0)
2867		goto cleanup_mdiobus;
2868	return 0;
2869
2870cleanup_mdiobus:
2871	lan743x_mdiobus_cleanup(adapter);
2872
2873cleanup_hardware:
2874	lan743x_hardware_cleanup(adapter);
2875
2876cleanup_pci:
2877	lan743x_pci_cleanup(adapter);
2878
2879return_error:
2880	pr_warn("Initialization failed\n");
2881	return ret;
2882}
2883
2884/**
2885 * lan743x_pcidev_remove - Device Removal Routine
2886 * @pdev: PCI device information struct
2887 *
2888 * this is called by the PCI subsystem to alert the driver
2889 * that it should release a PCI device.  This could be caused by a
2890 * Hot-Plug event, or because the driver is going to be removed from
2891 * memory.
2892 **/
2893static void lan743x_pcidev_remove(struct pci_dev *pdev)
2894{
2895	struct net_device *netdev = pci_get_drvdata(pdev);
2896	struct lan743x_adapter *adapter = netdev_priv(netdev);
2897
2898	lan743x_full_cleanup(adapter);
2899}
2900
2901static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
2902{
2903	struct net_device *netdev = pci_get_drvdata(pdev);
2904	struct lan743x_adapter *adapter = netdev_priv(netdev);
2905
2906	rtnl_lock();
2907	netif_device_detach(netdev);
2908
2909	/* close netdev when netdev is at running state.
2910	 * For instance, it is true when system goes to sleep by pm-suspend
2911	 * However, it is false when system goes to sleep by suspend GUI menu
2912	 */
2913	if (netif_running(netdev))
2914		lan743x_netdev_close(netdev);
2915	rtnl_unlock();
2916
2917#ifdef CONFIG_PM
2918	pci_save_state(pdev);
2919#endif
2920
2921	/* clean up lan743x portion */
2922	lan743x_hardware_cleanup(adapter);
2923}
2924
2925#ifdef CONFIG_PM_SLEEP
2926static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
2927{
2928	return bitrev16(crc16(0xFFFF, buf, len));
2929}
2930
2931static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
2932{
2933	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
2934	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
2935	const u8 arp_type[2] = { 0x08, 0x06 };
2936	int mask_index;
 
2937	u32 pmtctl;
2938	u32 wucsr;
2939	u32 macrx;
2940	u16 crc;
2941
2942	for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++)
2943		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0);
2944
2945	/* clear wake settings */
2946	pmtctl = lan743x_csr_read(adapter, PMT_CTL);
2947	pmtctl |= PMT_CTL_WUPS_MASK_;
2948	pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
2949		PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
2950		PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
2951
2952	macrx = lan743x_csr_read(adapter, MAC_RX);
2953
2954	wucsr = 0;
2955	mask_index = 0;
2956
2957	pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
2958
2959	if (adapter->wolopts & WAKE_PHY) {
2960		pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
2961		pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
2962	}
2963	if (adapter->wolopts & WAKE_MAGIC) {
2964		wucsr |= MAC_WUCSR_MPEN_;
2965		macrx |= MAC_RX_RXEN_;
2966		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2967	}
2968	if (adapter->wolopts & WAKE_UCAST) {
2969		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_;
2970		macrx |= MAC_RX_RXEN_;
2971		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2972		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
2973	}
2974	if (adapter->wolopts & WAKE_BCAST) {
2975		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_;
2976		macrx |= MAC_RX_RXEN_;
2977		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
2978		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
2979	}
2980	if (adapter->wolopts & WAKE_MCAST) {
2981		/* IPv4 multicast */
2982		crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3);
2983		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
2984				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
2985				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
2986				  (crc & MAC_WUF_CFG_CRC16_MASK_));
2987		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7);
2988		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
2989		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
2990		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
2991		mask_index++;
2992
2993		/* IPv6 multicast */
2994		crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2);
2995		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
2996				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ |
2997				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
2998				  (crc & MAC_WUF_CFG_CRC16_MASK_));
2999		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3);
3000		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3001		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3002		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3003		mask_index++;
3004
3005		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3006		macrx |= MAC_RX_RXEN_;
3007		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3008		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3009	}
3010	if (adapter->wolopts & WAKE_ARP) {
3011		/* set MAC_WUF_CFG & WUF_MASK
3012		 * for packettype (offset 12,13) = ARP (0x0806)
3013		 */
3014		crc = lan743x_pm_wakeframe_crc16(arp_type, 2);
3015		lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index),
3016				  MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ |
3017				  (0 << MAC_WUF_CFG_OFFSET_SHIFT_) |
3018				  (crc & MAC_WUF_CFG_CRC16_MASK_));
3019		lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000);
3020		lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0);
3021		lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0);
3022		lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0);
3023		mask_index++;
3024
3025		wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_;
3026		macrx |= MAC_RX_RXEN_;
3027		pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_;
3028		pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_;
3029	}
3030
 
 
 
 
 
 
 
 
3031	lan743x_csr_write(adapter, MAC_WUCSR, wucsr);
3032	lan743x_csr_write(adapter, PMT_CTL, pmtctl);
3033	lan743x_csr_write(adapter, MAC_RX, macrx);
3034}
3035
3036static int lan743x_pm_suspend(struct device *dev)
3037{
3038	struct pci_dev *pdev = to_pci_dev(dev);
3039	struct net_device *netdev = pci_get_drvdata(pdev);
3040	struct lan743x_adapter *adapter = netdev_priv(netdev);
3041	int ret;
3042
3043	lan743x_pcidev_shutdown(pdev);
3044
3045	/* clear all wakes */
3046	lan743x_csr_write(adapter, MAC_WUCSR, 0);
3047	lan743x_csr_write(adapter, MAC_WUCSR2, 0);
3048	lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
3049
3050	if (adapter->wolopts)
3051		lan743x_pm_set_wol(adapter);
3052
 
 
 
 
 
 
 
 
 
 
 
 
3053	/* Host sets PME_En, put D3hot */
3054	ret = pci_prepare_to_sleep(pdev);
3055
3056	return 0;
3057}
3058
3059static int lan743x_pm_resume(struct device *dev)
3060{
3061	struct pci_dev *pdev = to_pci_dev(dev);
3062	struct net_device *netdev = pci_get_drvdata(pdev);
3063	struct lan743x_adapter *adapter = netdev_priv(netdev);
3064	int ret;
3065
3066	pci_set_power_state(pdev, PCI_D0);
3067	pci_restore_state(pdev);
3068	pci_save_state(pdev);
3069
 
 
 
 
3070	ret = lan743x_hardware_init(adapter, pdev);
3071	if (ret) {
3072		netif_err(adapter, probe, adapter->netdev,
3073			  "lan743x_hardware_init returned %d\n", ret);
 
 
3074	}
3075
3076	/* open netdev when netdev is at running state while resume.
3077	 * For instance, it is true when system wakesup after pm-suspend
3078	 * However, it is false when system wakes up after suspend GUI menu
3079	 */
3080	if (netif_running(netdev))
3081		lan743x_netdev_open(netdev);
3082
3083	netif_device_attach(netdev);
 
 
 
3084
3085	return 0;
3086}
3087
3088static const struct dev_pm_ops lan743x_pm_ops = {
3089	SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3090};
3091#endif /* CONFIG_PM_SLEEP */
3092
3093static const struct pci_device_id lan743x_pcidev_tbl[] = {
3094	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
3095	{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
 
 
3096	{ 0, }
3097};
3098
3099MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
3100
3101static struct pci_driver lan743x_pcidev_driver = {
3102	.name     = DRIVER_NAME,
3103	.id_table = lan743x_pcidev_tbl,
3104	.probe    = lan743x_pcidev_probe,
3105	.remove   = lan743x_pcidev_remove,
3106#ifdef CONFIG_PM_SLEEP
3107	.driver.pm = &lan743x_pm_ops,
3108#endif
3109	.shutdown = lan743x_pcidev_shutdown,
3110};
3111
3112module_pci_driver(lan743x_pcidev_driver);
3113
3114MODULE_AUTHOR(DRIVER_AUTHOR);
3115MODULE_DESCRIPTION(DRIVER_DESC);
3116MODULE_LICENSE("GPL");