Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Linux driver for VMware's vmxnet3 ethernet NIC.
   3 *
   4 * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the
   8 * Free Software Foundation; version 2 of the License and no later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13 * NON INFRINGEMENT. See the GNU General Public License for more
  14 * details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 *
  20 * The full GNU General Public License is included in this distribution in
  21 * the file called "COPYING".
  22 *
  23 * Maintained by: pv-drivers@vmware.com
  24 *
  25 */
  26
  27#include <linux/module.h>
  28#include <net/ip6_checksum.h>
  29
  30#include "vmxnet3_int.h"
  31#include "vmxnet3_xdp.h"
  32
  33char vmxnet3_driver_name[] = "vmxnet3";
  34#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
  35
  36/*
  37 * PCI Device ID Table
  38 * Last entry must be all 0s
  39 */
  40static const struct pci_device_id vmxnet3_pciid_table[] = {
  41	{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
  42	{0}
  43};
  44
  45MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
  46
  47static int enable_mq = 1;
  48
  49static void
  50vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
  51
  52/*
  53 *    Enable/Disable the given intr
  54 */
  55static void
  56vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  57{
  58	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
  59}
  60
  61
  62static void
  63vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  64{
  65	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
  66}
  67
  68
  69/*
  70 *    Enable/Disable all intrs used by the device
  71 */
  72static void
  73vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
  74{
  75	int i;
  76
  77	for (i = 0; i < adapter->intr.num_intrs; i++)
  78		vmxnet3_enable_intr(adapter, i);
  79	if (!VMXNET3_VERSION_GE_6(adapter) ||
  80	    !adapter->queuesExtEnabled) {
  81		adapter->shared->devRead.intrConf.intrCtrl &=
  82					cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
  83	} else {
  84		adapter->shared->devReadExt.intrConfExt.intrCtrl &=
  85					cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
  86	}
  87}
  88
  89
  90static void
  91vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
  92{
  93	int i;
  94
  95	if (!VMXNET3_VERSION_GE_6(adapter) ||
  96	    !adapter->queuesExtEnabled) {
  97		adapter->shared->devRead.intrConf.intrCtrl |=
  98					cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
  99	} else {
 100		adapter->shared->devReadExt.intrConfExt.intrCtrl |=
 101					cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
 102	}
 103	for (i = 0; i < adapter->intr.num_intrs; i++)
 104		vmxnet3_disable_intr(adapter, i);
 105}
 106
 107
 108static void
 109vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
 110{
 111	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
 112}
 113
 114
 115static bool
 116vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 117{
 118	return tq->stopped;
 119}
 120
 121
 122static void
 123vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 124{
 125	tq->stopped = false;
 126	netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
 127}
 128
 129
 130static void
 131vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 132{
 133	tq->stopped = false;
 134	netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
 135}
 136
 137
 138static void
 139vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 140{
 141	tq->stopped = true;
 142	tq->num_stop++;
 143	netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
 144}
 145
 146/* Check if capability is supported by UPT device or
 147 * UPT is even requested
 148 */
 149bool
 150vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
 151{
 152	if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
 153	    cap_supported & (1UL << cap)) {
 154		return true;
 155	}
 156
 157	return false;
 158}
 159
 160
 161/*
 162 * Check the link state. This may start or stop the tx queue.
 163 */
 164static void
 165vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
 166{
 167	u32 ret;
 168	int i;
 169	unsigned long flags;
 170
 171	spin_lock_irqsave(&adapter->cmd_lock, flags);
 172	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
 173	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
 174	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 175
 176	adapter->link_speed = ret >> 16;
 177	if (ret & 1) { /* Link is up. */
 178		netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
 179			    adapter->link_speed);
 180		netif_carrier_on(adapter->netdev);
 181
 182		if (affectTxQueue) {
 183			for (i = 0; i < adapter->num_tx_queues; i++)
 184				vmxnet3_tq_start(&adapter->tx_queue[i],
 185						 adapter);
 186		}
 187	} else {
 188		netdev_info(adapter->netdev, "NIC Link is Down\n");
 189		netif_carrier_off(adapter->netdev);
 190
 191		if (affectTxQueue) {
 192			for (i = 0; i < adapter->num_tx_queues; i++)
 193				vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
 194		}
 195	}
 196}
 197
 198static void
 199vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 200{
 201	int i;
 202	unsigned long flags;
 203	u32 events = le32_to_cpu(adapter->shared->ecr);
 204	if (!events)
 205		return;
 206
 207	vmxnet3_ack_events(adapter, events);
 208
 209	/* Check if link state has changed */
 210	if (events & VMXNET3_ECR_LINK)
 211		vmxnet3_check_link(adapter, true);
 212
 213	/* Check if there is an error on xmit/recv queues */
 214	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
 215		spin_lock_irqsave(&adapter->cmd_lock, flags);
 216		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 217				       VMXNET3_CMD_GET_QUEUE_STATUS);
 218		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 219
 220		for (i = 0; i < adapter->num_tx_queues; i++)
 221			if (adapter->tqd_start[i].status.stopped)
 222				dev_err(&adapter->netdev->dev,
 223					"%s: tq[%d] error 0x%x\n",
 224					adapter->netdev->name, i, le32_to_cpu(
 225					adapter->tqd_start[i].status.error));
 226		for (i = 0; i < adapter->num_rx_queues; i++)
 227			if (adapter->rqd_start[i].status.stopped)
 228				dev_err(&adapter->netdev->dev,
 229					"%s: rq[%d] error 0x%x\n",
 230					adapter->netdev->name, i,
 231					adapter->rqd_start[i].status.error);
 232
 233		schedule_work(&adapter->work);
 234	}
 235}
 236
 237#ifdef __BIG_ENDIAN_BITFIELD
 238/*
 239 * The device expects the bitfields in shared structures to be written in
 240 * little endian. When CPU is big endian, the following routines are used to
 241 * correctly read and write into ABI.
 242 * The general technique used here is : double word bitfields are defined in
 243 * opposite order for big endian architecture. Then before reading them in
 244 * driver the complete double word is translated using le32_to_cpu. Similarly
 245 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
 246 * double words into required format.
 247 * In order to avoid touching bits in shared structure more than once, temporary
 248 * descriptors are used. These are passed as srcDesc to following functions.
 249 */
 250static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
 251				struct Vmxnet3_RxDesc *dstDesc)
 252{
 253	u32 *src = (u32 *)srcDesc + 2;
 254	u32 *dst = (u32 *)dstDesc + 2;
 255	dstDesc->addr = le64_to_cpu(srcDesc->addr);
 256	*dst = le32_to_cpu(*src);
 257	dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
 258}
 259
 260static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
 261			       struct Vmxnet3_TxDesc *dstDesc)
 262{
 263	int i;
 264	u32 *src = (u32 *)(srcDesc + 1);
 265	u32 *dst = (u32 *)(dstDesc + 1);
 266
 267	/* Working backwards so that the gen bit is set at the end. */
 268	for (i = 2; i > 0; i--) {
 269		src--;
 270		dst--;
 271		*dst = cpu_to_le32(*src);
 272	}
 273}
 274
 275
 276static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
 277				struct Vmxnet3_RxCompDesc *dstDesc)
 278{
 279	int i = 0;
 280	u32 *src = (u32 *)srcDesc;
 281	u32 *dst = (u32 *)dstDesc;
 282	for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
 283		*dst = le32_to_cpu(*src);
 284		src++;
 285		dst++;
 286	}
 287}
 288
 289
 290/* Used to read bitfield values from double words. */
 291static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
 292{
 293	u32 temp = le32_to_cpu(*bitfield);
 294	u32 mask = ((1 << size) - 1) << pos;
 295	temp &= mask;
 296	temp >>= pos;
 297	return temp;
 298}
 299
 300
 301
 302#endif  /* __BIG_ENDIAN_BITFIELD */
 303
 304#ifdef __BIG_ENDIAN_BITFIELD
 305
 306#   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
 307			txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
 308			VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
 309#   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
 310			txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
 311			VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
 312#   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
 313			VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
 314			VMXNET3_TCD_GEN_SIZE)
 315#   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
 316			VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
 317#   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
 318			(dstrcd) = (tmp); \
 319			vmxnet3_RxCompToCPU((rcd), (tmp)); \
 320		} while (0)
 321#   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
 322			(dstrxd) = (tmp); \
 323			vmxnet3_RxDescToCPU((rxd), (tmp)); \
 324		} while (0)
 325
 326#else
 327
 328#   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
 329#   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
 330#   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
 331#   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
 332#   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
 333#   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
 334
 335#endif /* __BIG_ENDIAN_BITFIELD  */
 336
 337
 338static void
 339vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
 340		     struct pci_dev *pdev)
 341{
 342	u32 map_type = tbi->map_type;
 343
 344	if (map_type & VMXNET3_MAP_SINGLE)
 345		dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
 346				 DMA_TO_DEVICE);
 347	else if (map_type & VMXNET3_MAP_PAGE)
 348		dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
 349			       DMA_TO_DEVICE);
 350	else
 351		BUG_ON(map_type & ~VMXNET3_MAP_XDP);
 352
 353	tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
 354}
 355
 356
 357static int
 358vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
 359		  struct pci_dev *pdev,	struct vmxnet3_adapter *adapter,
 360		  struct xdp_frame_bulk *bq)
 361{
 362	struct vmxnet3_tx_buf_info *tbi;
 363	int entries = 0;
 364	u32 map_type;
 365
 366	/* no out of order completion */
 367	BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
 368	BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
 369
 370	tbi = &tq->buf_info[eop_idx];
 371	BUG_ON(!tbi->skb);
 372	map_type = tbi->map_type;
 
 373	VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
 374
 375	while (tq->tx_ring.next2comp != eop_idx) {
 376		vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
 377				     pdev);
 378
 379		/* update next2comp w/o tx_lock. Since we are marking more,
 380		 * instead of less, tx ring entries avail, the worst case is
 381		 * that the tx routine incorrectly re-queues a pkt due to
 382		 * insufficient tx ring entries.
 383		 */
 384		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
 385		entries++;
 386	}
 387
 388	if (map_type & VMXNET3_MAP_XDP)
 389		xdp_return_frame_bulk(tbi->xdpf, bq);
 390	else
 391		dev_kfree_skb_any(tbi->skb);
 392
 393	/* xdpf and skb are in an anonymous union. */
 394	tbi->skb = NULL;
 395
 396	return entries;
 397}
 398
 399
 400static int
 401vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
 402			struct vmxnet3_adapter *adapter)
 403{
 404	union Vmxnet3_GenericDesc *gdesc;
 405	struct xdp_frame_bulk bq;
 406	int completed = 0;
 407
 408	xdp_frame_bulk_init(&bq);
 409	rcu_read_lock();
 410
 411	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
 412	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
 413		/* Prevent any &gdesc->tcd field from being (speculatively)
 414		 * read before (&gdesc->tcd)->gen is read.
 415		 */
 416		dma_rmb();
 417
 418		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
 419					       &gdesc->tcd), tq, adapter->pdev,
 420					       adapter, &bq);
 421
 422		vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
 423		gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
 424	}
 425	xdp_flush_frame_bulk(&bq);
 426	rcu_read_unlock();
 427
 428	if (completed) {
 429		spin_lock(&tq->tx_lock);
 430		if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
 431			     vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
 432			     VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
 433			     netif_carrier_ok(adapter->netdev))) {
 434			vmxnet3_tq_wake(tq, adapter);
 435		}
 436		spin_unlock(&tq->tx_lock);
 437	}
 438	return completed;
 439}
 440
 441
 442static void
 443vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
 444		   struct vmxnet3_adapter *adapter)
 445{
 446	struct xdp_frame_bulk bq;
 447	u32 map_type;
 448	int i;
 449
 450	xdp_frame_bulk_init(&bq);
 451	rcu_read_lock();
 452
 453	while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
 454		struct vmxnet3_tx_buf_info *tbi;
 455
 456		tbi = tq->buf_info + tq->tx_ring.next2comp;
 457		map_type = tbi->map_type;
 458
 459		vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
 460		if (tbi->skb) {
 461			if (map_type & VMXNET3_MAP_XDP)
 462				xdp_return_frame_bulk(tbi->xdpf, &bq);
 463			else
 464				dev_kfree_skb_any(tbi->skb);
 465			tbi->skb = NULL;
 466		}
 467		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
 468	}
 469
 470	xdp_flush_frame_bulk(&bq);
 471	rcu_read_unlock();
 472
 473	/* sanity check, verify all buffers are indeed unmapped */
 474	for (i = 0; i < tq->tx_ring.size; i++)
 475		BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
 476
 477	tq->tx_ring.gen = VMXNET3_INIT_GEN;
 478	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
 479
 480	tq->comp_ring.gen = VMXNET3_INIT_GEN;
 481	tq->comp_ring.next2proc = 0;
 482}
 483
 484
 485static void
 486vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
 487		   struct vmxnet3_adapter *adapter)
 488{
 489	if (tq->tx_ring.base) {
 490		dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
 491				  sizeof(struct Vmxnet3_TxDesc),
 492				  tq->tx_ring.base, tq->tx_ring.basePA);
 493		tq->tx_ring.base = NULL;
 494	}
 495	if (tq->data_ring.base) {
 496		dma_free_coherent(&adapter->pdev->dev,
 497				  tq->data_ring.size * tq->txdata_desc_size,
 498				  tq->data_ring.base, tq->data_ring.basePA);
 499		tq->data_ring.base = NULL;
 500	}
 501	if (tq->comp_ring.base) {
 502		dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
 503				  sizeof(struct Vmxnet3_TxCompDesc),
 504				  tq->comp_ring.base, tq->comp_ring.basePA);
 505		tq->comp_ring.base = NULL;
 506	}
 507	kfree(tq->buf_info);
 508	tq->buf_info = NULL;
 
 
 
 
 509}
 510
 511
 512/* Destroy all tx queues */
 513void
 514vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
 515{
 516	int i;
 517
 518	for (i = 0; i < adapter->num_tx_queues; i++)
 519		vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
 520}
 521
 522
 523static void
 524vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
 525		struct vmxnet3_adapter *adapter)
 526{
 527	int i;
 528
 529	/* reset the tx ring contents to 0 and reset the tx ring states */
 530	memset(tq->tx_ring.base, 0, tq->tx_ring.size *
 531	       sizeof(struct Vmxnet3_TxDesc));
 532	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
 533	tq->tx_ring.gen = VMXNET3_INIT_GEN;
 534
 535	memset(tq->data_ring.base, 0,
 536	       tq->data_ring.size * tq->txdata_desc_size);
 537
 538	/* reset the tx comp ring contents to 0 and reset comp ring states */
 539	memset(tq->comp_ring.base, 0, tq->comp_ring.size *
 540	       sizeof(struct Vmxnet3_TxCompDesc));
 541	tq->comp_ring.next2proc = 0;
 542	tq->comp_ring.gen = VMXNET3_INIT_GEN;
 543
 544	/* reset the bookkeeping data */
 545	memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
 546	for (i = 0; i < tq->tx_ring.size; i++)
 547		tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
 548
 549	/* stats are not reset */
 550}
 551
 552
 553static int
 554vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
 555		  struct vmxnet3_adapter *adapter)
 556{
 
 
 557	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
 558	       tq->comp_ring.base || tq->buf_info);
 559
 560	tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
 561			tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
 562			&tq->tx_ring.basePA, GFP_KERNEL);
 563	if (!tq->tx_ring.base) {
 564		netdev_err(adapter->netdev, "failed to allocate tx ring\n");
 565		goto err;
 566	}
 567
 568	tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
 569			tq->data_ring.size * tq->txdata_desc_size,
 570			&tq->data_ring.basePA, GFP_KERNEL);
 571	if (!tq->data_ring.base) {
 572		netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
 573		goto err;
 574	}
 575
 576	tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
 577			tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
 578			&tq->comp_ring.basePA, GFP_KERNEL);
 579	if (!tq->comp_ring.base) {
 580		netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
 581		goto err;
 582	}
 583
 584	tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
 585				    GFP_KERNEL,
 586				    dev_to_node(&adapter->pdev->dev));
 587	if (!tq->buf_info)
 588		goto err;
 589
 590	return 0;
 591
 592err:
 593	vmxnet3_tq_destroy(tq, adapter);
 594	return -ENOMEM;
 595}
 596
 597static void
 598vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
 599{
 600	int i;
 601
 602	for (i = 0; i < adapter->num_tx_queues; i++)
 603		vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
 604}
 605
 606/*
 607 *    starting from ring->next2fill, allocate rx buffers for the given ring
 608 *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
 609 *    are allocated or allocation fails
 610 */
 611
 612static int
 613vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
 614			int num_to_alloc, struct vmxnet3_adapter *adapter)
 615{
 616	int num_allocated = 0;
 617	struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
 618	struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
 619	u32 val;
 620
 621	while (num_allocated <= num_to_alloc) {
 622		struct vmxnet3_rx_buf_info *rbi;
 623		union Vmxnet3_GenericDesc *gd;
 624
 625		rbi = rbi_base + ring->next2fill;
 626		gd = ring->base + ring->next2fill;
 627		rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
 628
 629		if (rbi->buf_type == VMXNET3_RX_BUF_XDP) {
 630			void *data = vmxnet3_pp_get_buff(rq->page_pool,
 631							 &rbi->dma_addr,
 632							 GFP_KERNEL);
 633			if (!data) {
 634				rq->stats.rx_buf_alloc_failure++;
 635				break;
 636			}
 637			rbi->page = virt_to_page(data);
 638			val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
 639		} else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
 640			if (rbi->skb == NULL) {
 641				rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
 642								       rbi->len,
 643								       GFP_KERNEL);
 644				if (unlikely(rbi->skb == NULL)) {
 645					rq->stats.rx_buf_alloc_failure++;
 646					break;
 647				}
 648
 649				rbi->dma_addr = dma_map_single(
 650						&adapter->pdev->dev,
 651						rbi->skb->data, rbi->len,
 652						DMA_FROM_DEVICE);
 653				if (dma_mapping_error(&adapter->pdev->dev,
 654						      rbi->dma_addr)) {
 655					dev_kfree_skb_any(rbi->skb);
 656					rbi->skb = NULL;
 657					rq->stats.rx_buf_alloc_failure++;
 658					break;
 659				}
 660			} else {
 661				/* rx buffer skipped by the device */
 662			}
 663			val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
 664		} else {
 665			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
 666			       rbi->len  != PAGE_SIZE);
 667
 668			if (rbi->page == NULL) {
 669				rbi->page = alloc_page(GFP_ATOMIC);
 670				if (unlikely(rbi->page == NULL)) {
 671					rq->stats.rx_buf_alloc_failure++;
 672					break;
 673				}
 674				rbi->dma_addr = dma_map_page(
 675						&adapter->pdev->dev,
 676						rbi->page, 0, PAGE_SIZE,
 677						DMA_FROM_DEVICE);
 678				if (dma_mapping_error(&adapter->pdev->dev,
 679						      rbi->dma_addr)) {
 680					put_page(rbi->page);
 681					rbi->page = NULL;
 682					rq->stats.rx_buf_alloc_failure++;
 683					break;
 684				}
 685			} else {
 686				/* rx buffers skipped by the device */
 687			}
 688			val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
 689		}
 690
 691		gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
 692		gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
 693					   | val | rbi->len);
 694
 695		/* Fill the last buffer but dont mark it ready, or else the
 696		 * device will think that the queue is full */
 697		if (num_allocated == num_to_alloc) {
 698			rbi->comp_state = VMXNET3_RXD_COMP_DONE;
 699			break;
 700		}
 701
 702		gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
 703		num_allocated++;
 704		vmxnet3_cmd_ring_adv_next2fill(ring);
 705	}
 706
 707	netdev_dbg(adapter->netdev,
 708		"alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
 709		num_allocated, ring->next2fill, ring->next2comp);
 710
 711	/* so that the device can distinguish a full ring and an empty ring */
 712	BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
 713
 714	return num_allocated;
 715}
 716
 717
 718static void
 719vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
 720		    struct vmxnet3_rx_buf_info *rbi)
 721{
 722	skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
 723
 724	BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
 725
 726	skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len);
 
 
 727	skb->data_len += rcd->len;
 728	skb->truesize += PAGE_SIZE;
 729	skb_shinfo(skb)->nr_frags++;
 730}
 731
 732
 733static int
 734vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
 735		struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
 736		struct vmxnet3_adapter *adapter)
 737{
 738	u32 dw2, len;
 739	unsigned long buf_offset;
 740	int i;
 741	union Vmxnet3_GenericDesc *gdesc;
 742	struct vmxnet3_tx_buf_info *tbi = NULL;
 743
 744	BUG_ON(ctx->copy_size > skb_headlen(skb));
 745
 746	/* use the previous gen bit for the SOP desc */
 747	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
 748
 749	ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
 750	gdesc = ctx->sop_txd; /* both loops below can be skipped */
 751
 752	/* no need to map the buffer if headers are copied */
 753	if (ctx->copy_size) {
 754		ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
 755					tq->tx_ring.next2fill *
 756					tq->txdata_desc_size);
 757		ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
 758		ctx->sop_txd->dword[3] = 0;
 759
 760		tbi = tq->buf_info + tq->tx_ring.next2fill;
 761		tbi->map_type = VMXNET3_MAP_NONE;
 762
 763		netdev_dbg(adapter->netdev,
 764			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
 765			tq->tx_ring.next2fill,
 766			le64_to_cpu(ctx->sop_txd->txd.addr),
 767			ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
 768		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
 769
 770		/* use the right gen for non-SOP desc */
 771		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 772	}
 773
 774	/* linear part can use multiple tx desc if it's big */
 775	len = skb_headlen(skb) - ctx->copy_size;
 776	buf_offset = ctx->copy_size;
 777	while (len) {
 778		u32 buf_size;
 779
 780		if (len < VMXNET3_MAX_TX_BUF_SIZE) {
 781			buf_size = len;
 782			dw2 |= len;
 783		} else {
 784			buf_size = VMXNET3_MAX_TX_BUF_SIZE;
 785			/* spec says that for TxDesc.len, 0 == 2^14 */
 786		}
 787
 788		tbi = tq->buf_info + tq->tx_ring.next2fill;
 789		tbi->map_type = VMXNET3_MAP_SINGLE;
 790		tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
 791				skb->data + buf_offset, buf_size,
 792				DMA_TO_DEVICE);
 793		if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
 794			return -EFAULT;
 795
 796		tbi->len = buf_size;
 797
 798		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
 799		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
 800
 801		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
 802		gdesc->dword[2] = cpu_to_le32(dw2);
 803		gdesc->dword[3] = 0;
 804
 805		netdev_dbg(adapter->netdev,
 806			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
 807			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
 808			le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
 809		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
 810		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 811
 812		len -= buf_size;
 813		buf_offset += buf_size;
 814	}
 815
 816	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 817		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 818		u32 buf_size;
 819
 820		buf_offset = 0;
 821		len = skb_frag_size(frag);
 822		while (len) {
 823			tbi = tq->buf_info + tq->tx_ring.next2fill;
 824			if (len < VMXNET3_MAX_TX_BUF_SIZE) {
 825				buf_size = len;
 826				dw2 |= len;
 827			} else {
 828				buf_size = VMXNET3_MAX_TX_BUF_SIZE;
 829				/* spec says that for TxDesc.len, 0 == 2^14 */
 830			}
 831			tbi->map_type = VMXNET3_MAP_PAGE;
 832			tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
 833							 buf_offset, buf_size,
 834							 DMA_TO_DEVICE);
 835			if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
 836				return -EFAULT;
 837
 838			tbi->len = buf_size;
 839
 840			gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
 841			BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
 842
 843			gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
 844			gdesc->dword[2] = cpu_to_le32(dw2);
 845			gdesc->dword[3] = 0;
 846
 847			netdev_dbg(adapter->netdev,
 848				"txd[%u]: 0x%llx %u %u\n",
 849				tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
 850				le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
 851			vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
 852			dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 853
 854			len -= buf_size;
 855			buf_offset += buf_size;
 856		}
 857	}
 858
 859	ctx->eop_txd = gdesc;
 860
 861	/* set the last buf_info for the pkt */
 862	tbi->skb = skb;
 863	tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
 864
 865	return 0;
 866}
 867
 868
 869/* Init all tx queues */
 870static void
 871vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
 872{
 873	int i;
 874
 875	for (i = 0; i < adapter->num_tx_queues; i++)
 876		vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
 877}
 878
 879
 880/*
 881 *    parse relevant protocol headers:
 882 *      For a tso pkt, relevant headers are L2/3/4 including options
 883 *      For a pkt requesting csum offloading, they are L2/3 and may include L4
 884 *      if it's a TCP/UDP pkt
 885 *
 886 * Returns:
 887 *    -1:  error happens during parsing
 888 *     0:  protocol headers parsed, but too big to be copied
 889 *     1:  protocol headers parsed and copied
 890 *
 891 * Other effects:
 892 *    1. related *ctx fields are updated.
 893 *    2. ctx->copy_size is # of bytes copied
 894 *    3. the portion to be copied is guaranteed to be in the linear part
 895 *
 896 */
 897static int
 898vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 899		  struct vmxnet3_tx_ctx *ctx,
 900		  struct vmxnet3_adapter *adapter)
 901{
 902	u8 protocol = 0;
 903
 904	if (ctx->mss) {	/* TSO */
 905		if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
 906			ctx->l4_offset = skb_inner_transport_offset(skb);
 907			ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
 908			ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
 909		} else {
 910			ctx->l4_offset = skb_transport_offset(skb);
 911			ctx->l4_hdr_size = tcp_hdrlen(skb);
 912			ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
 913		}
 914	} else {
 915		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 916			/* For encap packets, skb_checksum_start_offset refers
 917			 * to inner L4 offset. Thus, below works for encap as
 918			 * well as non-encap case
 919			 */
 920			ctx->l4_offset = skb_checksum_start_offset(skb);
 921
 922			if (VMXNET3_VERSION_GE_4(adapter) &&
 923			    skb->encapsulation) {
 924				struct iphdr *iph = inner_ip_hdr(skb);
 925
 926				if (iph->version == 4) {
 927					protocol = iph->protocol;
 928				} else {
 929					const struct ipv6hdr *ipv6h;
 930
 931					ipv6h = inner_ipv6_hdr(skb);
 932					protocol = ipv6h->nexthdr;
 933				}
 934			} else {
 935				if (ctx->ipv4) {
 936					const struct iphdr *iph = ip_hdr(skb);
 937
 938					protocol = iph->protocol;
 939				} else if (ctx->ipv6) {
 940					const struct ipv6hdr *ipv6h;
 941
 942					ipv6h = ipv6_hdr(skb);
 943					protocol = ipv6h->nexthdr;
 944				}
 945			}
 946
 947			switch (protocol) {
 948			case IPPROTO_TCP:
 949				ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
 950						   tcp_hdrlen(skb);
 951				break;
 952			case IPPROTO_UDP:
 953				ctx->l4_hdr_size = sizeof(struct udphdr);
 954				break;
 955			default:
 956				ctx->l4_hdr_size = 0;
 957				break;
 958			}
 959
 960			ctx->copy_size = min(ctx->l4_offset +
 961					 ctx->l4_hdr_size, skb->len);
 962		} else {
 963			ctx->l4_offset = 0;
 964			ctx->l4_hdr_size = 0;
 965			/* copy as much as allowed */
 966			ctx->copy_size = min_t(unsigned int,
 967					       tq->txdata_desc_size,
 968					       skb_headlen(skb));
 969		}
 970
 971		if (skb->len <= VMXNET3_HDR_COPY_SIZE)
 972			ctx->copy_size = skb->len;
 973
 974		/* make sure headers are accessible directly */
 975		if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
 976			goto err;
 977	}
 978
 979	if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
 980		tq->stats.oversized_hdr++;
 981		ctx->copy_size = 0;
 982		return 0;
 983	}
 984
 985	return 1;
 986err:
 987	return -1;
 988}
 989
 990/*
 991 *    copy relevant protocol headers to the transmit ring:
 992 *      For a tso pkt, relevant headers are L2/3/4 including options
 993 *      For a pkt requesting csum offloading, they are L2/3 and may include L4
 994 *      if it's a TCP/UDP pkt
 995 *
 996 *
 997 *    Note that this requires that vmxnet3_parse_hdr be called first to set the
 998 *      appropriate bits in ctx first
 999 */
1000static void
1001vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1002		 struct vmxnet3_tx_ctx *ctx,
1003		 struct vmxnet3_adapter *adapter)
1004{
1005	struct Vmxnet3_TxDataDesc *tdd;
1006
1007	tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
1008					    tq->tx_ring.next2fill *
1009					    tq->txdata_desc_size);
1010
1011	memcpy(tdd->data, skb->data, ctx->copy_size);
1012	netdev_dbg(adapter->netdev,
1013		"copy %u bytes to dataRing[%u]\n",
1014		ctx->copy_size, tq->tx_ring.next2fill);
1015}
1016
1017
1018static void
1019vmxnet3_prepare_inner_tso(struct sk_buff *skb,
1020			  struct vmxnet3_tx_ctx *ctx)
1021{
1022	struct tcphdr *tcph = inner_tcp_hdr(skb);
1023	struct iphdr *iph = inner_ip_hdr(skb);
1024
1025	if (iph->version == 4) {
1026		iph->check = 0;
1027		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1028						 IPPROTO_TCP, 0);
1029	} else {
1030		struct ipv6hdr *iph = inner_ipv6_hdr(skb);
1031
1032		tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
1033					       IPPROTO_TCP, 0);
1034	}
1035}
1036
1037static void
1038vmxnet3_prepare_tso(struct sk_buff *skb,
1039		    struct vmxnet3_tx_ctx *ctx)
1040{
1041	struct tcphdr *tcph = tcp_hdr(skb);
1042
1043	if (ctx->ipv4) {
1044		struct iphdr *iph = ip_hdr(skb);
1045
1046		iph->check = 0;
1047		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1048						 IPPROTO_TCP, 0);
1049	} else if (ctx->ipv6) {
1050		tcp_v6_gso_csum_prep(skb);
 
 
 
1051	}
1052}
1053
1054static int txd_estimate(const struct sk_buff *skb)
1055{
1056	int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1057	int i;
1058
1059	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1060		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1061
1062		count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
1063	}
1064	return count;
1065}
1066
1067/*
1068 * Transmits a pkt thru a given tq
1069 * Returns:
1070 *    NETDEV_TX_OK:      descriptors are setup successfully
1071 *    NETDEV_TX_OK:      error occurred, the pkt is dropped
1072 *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
1073 *
1074 * Side-effects:
1075 *    1. tx ring may be changed
1076 *    2. tq stats may be updated accordingly
1077 *    3. shared->txNumDeferred may be updated
1078 */
1079
1080static int
1081vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1082		struct vmxnet3_adapter *adapter, struct net_device *netdev)
1083{
1084	int ret;
1085	u32 count;
1086	int num_pkts;
1087	int tx_num_deferred;
1088	unsigned long flags;
1089	struct vmxnet3_tx_ctx ctx;
1090	union Vmxnet3_GenericDesc *gdesc;
1091#ifdef __BIG_ENDIAN_BITFIELD
1092	/* Use temporary descriptor to avoid touching bits multiple times */
1093	union Vmxnet3_GenericDesc tempTxDesc;
1094#endif
1095
1096	count = txd_estimate(skb);
1097
1098	ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1099	ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1100
1101	ctx.mss = skb_shinfo(skb)->gso_size;
1102	if (ctx.mss) {
1103		if (skb_header_cloned(skb)) {
1104			if (unlikely(pskb_expand_head(skb, 0, 0,
1105						      GFP_ATOMIC) != 0)) {
1106				tq->stats.drop_tso++;
1107				goto drop_pkt;
1108			}
1109			tq->stats.copy_skb_header++;
1110		}
1111		if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1112			/* tso pkts must not use more than
1113			 * VMXNET3_MAX_TSO_TXD_PER_PKT entries
1114			 */
1115			if (skb_linearize(skb) != 0) {
1116				tq->stats.drop_too_many_frags++;
1117				goto drop_pkt;
1118			}
1119			tq->stats.linearized++;
1120
1121			/* recalculate the # of descriptors to use */
1122			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1123			if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1124				tq->stats.drop_too_many_frags++;
1125				goto drop_pkt;
1126			}
1127		}
1128		if (skb->encapsulation) {
1129			vmxnet3_prepare_inner_tso(skb, &ctx);
1130		} else {
1131			vmxnet3_prepare_tso(skb, &ctx);
1132		}
1133	} else {
1134		if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1135
1136			/* non-tso pkts must not use more than
1137			 * VMXNET3_MAX_TXD_PER_PKT entries
1138			 */
1139			if (skb_linearize(skb) != 0) {
1140				tq->stats.drop_too_many_frags++;
1141				goto drop_pkt;
1142			}
1143			tq->stats.linearized++;
1144
1145			/* recalculate the # of descriptors to use */
1146			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1147		}
1148	}
1149
1150	ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1151	if (ret >= 0) {
1152		BUG_ON(ret <= 0 && ctx.copy_size != 0);
1153		/* hdrs parsed, check against other limits */
1154		if (ctx.mss) {
1155			if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1156				     VMXNET3_MAX_TX_BUF_SIZE)) {
1157				tq->stats.drop_oversized_hdr++;
1158				goto drop_pkt;
1159			}
1160		} else {
1161			if (skb->ip_summed == CHECKSUM_PARTIAL) {
1162				if (unlikely(ctx.l4_offset +
1163					     skb->csum_offset >
1164					     VMXNET3_MAX_CSUM_OFFSET)) {
1165					tq->stats.drop_oversized_hdr++;
1166					goto drop_pkt;
1167				}
1168			}
1169		}
1170	} else {
1171		tq->stats.drop_hdr_inspect_err++;
1172		goto drop_pkt;
1173	}
1174
1175	spin_lock_irqsave(&tq->tx_lock, flags);
1176
1177	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1178		tq->stats.tx_ring_full++;
1179		netdev_dbg(adapter->netdev,
1180			"tx queue stopped on %s, next2comp %u"
1181			" next2fill %u\n", adapter->netdev->name,
1182			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1183
1184		vmxnet3_tq_stop(tq, adapter);
1185		spin_unlock_irqrestore(&tq->tx_lock, flags);
1186		return NETDEV_TX_BUSY;
1187	}
1188
1189
1190	vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1191
1192	/* fill tx descs related to addr & len */
1193	if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1194		goto unlock_drop_pkt;
1195
1196	/* setup the EOP desc */
1197	ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1198
1199	/* setup the SOP desc */
1200#ifdef __BIG_ENDIAN_BITFIELD
1201	gdesc = &tempTxDesc;
1202	gdesc->dword[2] = ctx.sop_txd->dword[2];
1203	gdesc->dword[3] = ctx.sop_txd->dword[3];
1204#else
1205	gdesc = ctx.sop_txd;
1206#endif
1207	tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1208	if (ctx.mss) {
1209		if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1210			gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1211			if (VMXNET3_VERSION_GE_7(adapter)) {
1212				gdesc->txd.om = VMXNET3_OM_TSO;
1213				gdesc->txd.ext1 = 1;
1214			} else {
1215				gdesc->txd.om = VMXNET3_OM_ENCAP;
1216			}
1217			gdesc->txd.msscof = ctx.mss;
1218
1219			if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1220				gdesc->txd.oco = 1;
1221		} else {
1222			gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1223			gdesc->txd.om = VMXNET3_OM_TSO;
1224			gdesc->txd.msscof = ctx.mss;
1225		}
1226		num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1227	} else {
1228		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1229			if (VMXNET3_VERSION_GE_4(adapter) &&
1230			    skb->encapsulation) {
1231				gdesc->txd.hlen = ctx.l4_offset +
1232						  ctx.l4_hdr_size;
1233				if (VMXNET3_VERSION_GE_7(adapter)) {
1234					gdesc->txd.om = VMXNET3_OM_CSUM;
1235					gdesc->txd.msscof = ctx.l4_offset +
1236							    skb->csum_offset;
1237					gdesc->txd.ext1 = 1;
1238				} else {
1239					gdesc->txd.om = VMXNET3_OM_ENCAP;
1240					gdesc->txd.msscof = 0;		/* Reserved */
1241				}
1242			} else {
1243				gdesc->txd.hlen = ctx.l4_offset;
1244				gdesc->txd.om = VMXNET3_OM_CSUM;
1245				gdesc->txd.msscof = ctx.l4_offset +
1246						    skb->csum_offset;
1247			}
1248		} else {
1249			gdesc->txd.om = 0;
1250			gdesc->txd.msscof = 0;
1251		}
1252		num_pkts = 1;
1253	}
1254	le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1255	tx_num_deferred += num_pkts;
1256
1257	if (skb_vlan_tag_present(skb)) {
1258		gdesc->txd.ti = 1;
1259		gdesc->txd.tci = skb_vlan_tag_get(skb);
1260	}
1261
1262	/* Ensure that the write to (&gdesc->txd)->gen will be observed after
1263	 * all other writes to &gdesc->txd.
1264	 */
1265	dma_wmb();
1266
1267	/* finally flips the GEN bit of the SOP desc. */
1268	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1269						  VMXNET3_TXD_GEN);
1270#ifdef __BIG_ENDIAN_BITFIELD
1271	/* Finished updating in bitfields of Tx Desc, so write them in original
1272	 * place.
1273	 */
1274	vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1275			   (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1276	gdesc = ctx.sop_txd;
1277#endif
1278	netdev_dbg(adapter->netdev,
1279		"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1280		(u32)(ctx.sop_txd -
1281		tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1282		le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1283
1284	spin_unlock_irqrestore(&tq->tx_lock, flags);
1285
1286	if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1287		tq->shared->txNumDeferred = 0;
1288		VMXNET3_WRITE_BAR0_REG(adapter,
1289				       adapter->tx_prod_offset + tq->qid * 8,
1290				       tq->tx_ring.next2fill);
1291	}
1292
1293	return NETDEV_TX_OK;
1294
1295unlock_drop_pkt:
1296	spin_unlock_irqrestore(&tq->tx_lock, flags);
1297drop_pkt:
1298	tq->stats.drop_total++;
1299	dev_kfree_skb_any(skb);
1300	return NETDEV_TX_OK;
1301}
1302
1303static int
1304vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
1305		  struct vmxnet3_rx_queue *rq, int size)
1306{
1307	bool xdp_prog = vmxnet3_xdp_enabled(adapter);
1308	const struct page_pool_params pp_params = {
1309		.order = 0,
1310		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1311		.pool_size = size,
1312		.nid = NUMA_NO_NODE,
1313		.dev = &adapter->pdev->dev,
1314		.offset = VMXNET3_XDP_RX_OFFSET,
1315		.max_len = VMXNET3_XDP_MAX_FRSIZE,
1316		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
1317	};
1318	struct page_pool *pp;
1319	int err;
1320
1321	pp = page_pool_create(&pp_params);
1322	if (IS_ERR(pp))
1323		return PTR_ERR(pp);
1324
1325	err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
1326			       rq->napi.napi_id);
1327	if (err < 0)
1328		goto err_free_pp;
1329
1330	err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
1331	if (err)
1332		goto err_unregister_rxq;
1333
1334	rq->page_pool = pp;
1335
1336	return 0;
1337
1338err_unregister_rxq:
1339	xdp_rxq_info_unreg(&rq->xdp_rxq);
1340err_free_pp:
1341	page_pool_destroy(pp);
1342
1343	return err;
1344}
1345
1346void *
1347vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1348		    gfp_t gfp_mask)
1349{
1350	struct page *page;
1351
1352	page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1353	if (unlikely(!page))
1354		return NULL;
1355
1356	*dma_addr = page_pool_get_dma_addr(page) + pp->p.offset;
1357
1358	return page_address(page);
1359}
1360
1361static netdev_tx_t
1362vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1363{
1364	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1365
1366	BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1367	return vmxnet3_tq_xmit(skb,
1368			       &adapter->tx_queue[skb->queue_mapping],
1369			       adapter, netdev);
1370}
1371
1372
1373static void
1374vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1375		struct sk_buff *skb,
1376		union Vmxnet3_GenericDesc *gdesc)
1377{
1378	if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1379		if (gdesc->rcd.v4 &&
1380		    (le32_to_cpu(gdesc->dword[3]) &
1381		     VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1382			skb->ip_summed = CHECKSUM_UNNECESSARY;
1383			if ((le32_to_cpu(gdesc->dword[0]) &
1384				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1385				skb->csum_level = 1;
1386			}
1387			WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1388				     !(le32_to_cpu(gdesc->dword[0]) &
1389				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1390			WARN_ON_ONCE(gdesc->rcd.frg &&
1391				     !(le32_to_cpu(gdesc->dword[0]) &
1392				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1393		} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1394					     (1 << VMXNET3_RCD_TUC_SHIFT))) {
1395			skb->ip_summed = CHECKSUM_UNNECESSARY;
1396			if ((le32_to_cpu(gdesc->dword[0]) &
1397				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1398				skb->csum_level = 1;
1399			}
1400			WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1401				     !(le32_to_cpu(gdesc->dword[0]) &
1402				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1403			WARN_ON_ONCE(gdesc->rcd.frg &&
1404				     !(le32_to_cpu(gdesc->dword[0]) &
1405				     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1406		} else {
1407			if (gdesc->rcd.csum) {
1408				skb->csum = htons(gdesc->rcd.csum);
1409				skb->ip_summed = CHECKSUM_PARTIAL;
1410			} else {
1411				skb_checksum_none_assert(skb);
1412			}
1413		}
1414	} else {
1415		skb_checksum_none_assert(skb);
1416	}
1417}
1418
1419
1420static void
1421vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1422		 struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
1423{
1424	rq->stats.drop_err++;
1425	if (!rcd->fcs)
1426		rq->stats.drop_fcs++;
1427
1428	rq->stats.drop_total++;
1429
1430	/*
1431	 * We do not unmap and chain the rx buffer to the skb.
1432	 * We basically pretend this buffer is not used and will be recycled
1433	 * by vmxnet3_rq_alloc_rx_buf()
1434	 */
1435
1436	/*
1437	 * ctx->skb may be NULL if this is the first and the only one
1438	 * desc for the pkt
1439	 */
1440	if (ctx->skb)
1441		dev_kfree_skb_irq(ctx->skb);
1442
1443	ctx->skb = NULL;
1444}
1445
1446
1447static u32
1448vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1449		    union Vmxnet3_GenericDesc *gdesc)
1450{
1451	u32 hlen, maplen;
1452	union {
1453		void *ptr;
1454		struct ethhdr *eth;
1455		struct vlan_ethhdr *veth;
1456		struct iphdr *ipv4;
1457		struct ipv6hdr *ipv6;
1458		struct tcphdr *tcp;
1459	} hdr;
1460	BUG_ON(gdesc->rcd.tcp == 0);
1461
1462	maplen = skb_headlen(skb);
1463	if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1464		return 0;
1465
1466	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1467	    skb->protocol == cpu_to_be16(ETH_P_8021AD))
1468		hlen = sizeof(struct vlan_ethhdr);
1469	else
1470		hlen = sizeof(struct ethhdr);
1471
1472	hdr.eth = eth_hdr(skb);
1473	if (gdesc->rcd.v4) {
1474		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1475		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1476		hdr.ptr += hlen;
1477		BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1478		hlen = hdr.ipv4->ihl << 2;
1479		hdr.ptr += hdr.ipv4->ihl << 2;
1480	} else if (gdesc->rcd.v6) {
1481		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1482		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1483		hdr.ptr += hlen;
1484		/* Use an estimated value, since we also need to handle
1485		 * TSO case.
1486		 */
1487		if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1488			return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1489		hlen = sizeof(struct ipv6hdr);
1490		hdr.ptr += sizeof(struct ipv6hdr);
1491	} else {
1492		/* Non-IP pkt, dont estimate header length */
1493		return 0;
1494	}
1495
1496	if (hlen + sizeof(struct tcphdr) > maplen)
1497		return 0;
1498
1499	return (hlen + (hdr.tcp->doff << 2));
1500}
1501
1502static int
1503vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1504		       struct vmxnet3_adapter *adapter, int quota)
1505{
1506	u32 rxprod_reg[2] = {
1507		adapter->rx_prod_offset, adapter->rx_prod2_offset
1508	};
1509	u32 num_pkts = 0;
1510	bool skip_page_frags = false;
1511	bool encap_lro = false;
1512	struct Vmxnet3_RxCompDesc *rcd;
1513	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1514	u16 segCnt = 0, mss = 0;
1515	int comp_offset, fill_offset;
1516#ifdef __BIG_ENDIAN_BITFIELD
1517	struct Vmxnet3_RxDesc rxCmdDesc;
1518	struct Vmxnet3_RxCompDesc rxComp;
1519#endif
1520	bool need_flush = false;
1521
1522	vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1523			  &rxComp);
1524	while (rcd->gen == rq->comp_ring.gen) {
1525		struct vmxnet3_rx_buf_info *rbi;
1526		struct sk_buff *skb, *new_skb = NULL;
1527		struct page *new_page = NULL;
1528		dma_addr_t new_dma_addr;
1529		int num_to_alloc;
1530		struct Vmxnet3_RxDesc *rxd;
1531		u32 idx, ring_idx;
1532		struct vmxnet3_cmd_ring	*ring = NULL;
1533		if (num_pkts >= quota) {
1534			/* we may stop even before we see the EOP desc of
1535			 * the current pkt
1536			 */
1537			break;
1538		}
1539
1540		/* Prevent any rcd field from being (speculatively) read before
1541		 * rcd->gen is read.
1542		 */
1543		dma_rmb();
1544
1545		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1546		       rcd->rqID != rq->dataRingQid);
1547		idx = rcd->rxdIdx;
1548		ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1549		ring = rq->rx_ring + ring_idx;
1550		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1551				  &rxCmdDesc);
1552		rbi = rq->buf_info[ring_idx] + idx;
1553
1554		BUG_ON(rxd->addr != rbi->dma_addr ||
1555		       rxd->len != rbi->len);
1556
1557		if (unlikely(rcd->eop && rcd->err)) {
1558			vmxnet3_rx_error(rq, rcd, ctx, adapter);
1559			goto rcd_done;
1560		}
1561
1562		if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
1563			struct sk_buff *skb_xdp_pass;
1564			int act;
1565
1566			if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
1567				ctx->skb = NULL;
1568				goto skip_xdp; /* Handle it later. */
1569			}
1570
1571			if (rbi->buf_type != VMXNET3_RX_BUF_XDP)
1572				goto rcd_done;
1573
1574			act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
1575						  &skb_xdp_pass);
1576			if (act == XDP_PASS) {
1577				ctx->skb = skb_xdp_pass;
1578				goto sop_done;
1579			}
1580			ctx->skb = NULL;
1581			need_flush |= act == XDP_REDIRECT;
1582
1583			goto rcd_done;
1584		}
1585skip_xdp:
1586
1587		if (rcd->sop) { /* first buf of the pkt */
1588			bool rxDataRingUsed;
1589			u16 len;
1590
1591			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1592			       (rcd->rqID != rq->qid &&
1593				rcd->rqID != rq->dataRingQid));
1594
1595			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB &&
1596			       rbi->buf_type != VMXNET3_RX_BUF_XDP);
1597			BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1598
1599			if (unlikely(rcd->len == 0)) {
1600				/* Pretend the rx buffer is skipped. */
1601				BUG_ON(!(rcd->sop && rcd->eop));
1602				netdev_dbg(adapter->netdev,
1603					"rxRing[%u][%u] 0 length\n",
1604					ring_idx, idx);
1605				goto rcd_done;
1606			}
1607
1608			skip_page_frags = false;
1609			ctx->skb = rbi->skb;
1610
1611			rxDataRingUsed =
1612				VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1613			len = rxDataRingUsed ? rcd->len : rbi->len;
1614
1615			if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
1616				struct sk_buff *skb_xdp_pass;
1617				size_t sz;
1618				int act;
1619
1620				sz = rcd->rxdIdx * rq->data_ring.desc_size;
1621				act = vmxnet3_process_xdp_small(adapter, rq,
1622								&rq->data_ring.base[sz],
1623								rcd->len,
1624								&skb_xdp_pass);
1625				if (act == XDP_PASS) {
1626					ctx->skb = skb_xdp_pass;
1627					goto sop_done;
1628				}
1629				need_flush |= act == XDP_REDIRECT;
1630
1631				goto rcd_done;
1632			}
1633			new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1634							    len);
1635			if (new_skb == NULL) {
1636				/* Skb allocation failed, do not handover this
1637				 * skb to stack. Reuse it. Drop the existing pkt
1638				 */
1639				rq->stats.rx_buf_alloc_failure++;
1640				ctx->skb = NULL;
1641				rq->stats.drop_total++;
1642				skip_page_frags = true;
1643				goto rcd_done;
1644			}
1645
1646			if (rxDataRingUsed && adapter->rxdataring_enabled) {
1647				size_t sz;
1648
1649				BUG_ON(rcd->len > rq->data_ring.desc_size);
1650
1651				ctx->skb = new_skb;
1652				sz = rcd->rxdIdx * rq->data_ring.desc_size;
1653				memcpy(new_skb->data,
1654				       &rq->data_ring.base[sz], rcd->len);
1655			} else {
1656				ctx->skb = rbi->skb;
1657
1658				new_dma_addr =
1659					dma_map_single(&adapter->pdev->dev,
1660						       new_skb->data, rbi->len,
1661						       DMA_FROM_DEVICE);
1662				if (dma_mapping_error(&adapter->pdev->dev,
1663						      new_dma_addr)) {
1664					dev_kfree_skb(new_skb);
1665					/* Skb allocation failed, do not
1666					 * handover this skb to stack. Reuse
1667					 * it. Drop the existing pkt.
1668					 */
1669					rq->stats.rx_buf_alloc_failure++;
1670					ctx->skb = NULL;
1671					rq->stats.drop_total++;
1672					skip_page_frags = true;
1673					goto rcd_done;
1674				}
1675
1676				dma_unmap_single(&adapter->pdev->dev,
1677						 rbi->dma_addr,
1678						 rbi->len,
1679						 DMA_FROM_DEVICE);
1680
1681				/* Immediate refill */
1682				rbi->skb = new_skb;
1683				rbi->dma_addr = new_dma_addr;
1684				rxd->addr = cpu_to_le64(rbi->dma_addr);
1685				rxd->len = rbi->len;
1686			}
1687
1688			skb_record_rx_queue(ctx->skb, rq->qid);
 
 
 
 
 
 
1689			skb_put(ctx->skb, rcd->len);
1690
1691			if (VMXNET3_VERSION_GE_2(adapter) &&
1692			    rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1693				struct Vmxnet3_RxCompDescExt *rcdlro;
1694				union Vmxnet3_GenericDesc *gdesc;
1695
1696				rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1697				gdesc = (union Vmxnet3_GenericDesc *)rcd;
1698
1699				segCnt = rcdlro->segCnt;
1700				WARN_ON_ONCE(segCnt == 0);
1701				mss = rcdlro->mss;
1702				if (unlikely(segCnt <= 1))
1703					segCnt = 0;
1704				encap_lro = (le32_to_cpu(gdesc->dword[0]) &
1705					(1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
1706			} else {
1707				segCnt = 0;
1708			}
1709		} else {
1710			BUG_ON(ctx->skb == NULL && !skip_page_frags);
1711
1712			/* non SOP buffer must be type 1 in most cases */
1713			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1714			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1715
1716			/* If an sop buffer was dropped, skip all
1717			 * following non-sop fragments. They will be reused.
1718			 */
1719			if (skip_page_frags)
1720				goto rcd_done;
1721
1722			if (rcd->len) {
1723				new_page = alloc_page(GFP_ATOMIC);
1724				/* Replacement page frag could not be allocated.
1725				 * Reuse this page. Drop the pkt and free the
1726				 * skb which contained this page as a frag. Skip
1727				 * processing all the following non-sop frags.
1728				 */
1729				if (unlikely(!new_page)) {
1730					rq->stats.rx_buf_alloc_failure++;
1731					dev_kfree_skb(ctx->skb);
1732					ctx->skb = NULL;
1733					skip_page_frags = true;
1734					goto rcd_done;
1735				}
1736				new_dma_addr = dma_map_page(&adapter->pdev->dev,
1737							    new_page,
1738							    0, PAGE_SIZE,
1739							    DMA_FROM_DEVICE);
1740				if (dma_mapping_error(&adapter->pdev->dev,
1741						      new_dma_addr)) {
1742					put_page(new_page);
1743					rq->stats.rx_buf_alloc_failure++;
1744					dev_kfree_skb(ctx->skb);
1745					ctx->skb = NULL;
1746					skip_page_frags = true;
1747					goto rcd_done;
1748				}
1749
1750				dma_unmap_page(&adapter->pdev->dev,
1751					       rbi->dma_addr, rbi->len,
1752					       DMA_FROM_DEVICE);
1753
1754				vmxnet3_append_frag(ctx->skb, rcd, rbi);
1755
1756				/* Immediate refill */
1757				rbi->page = new_page;
1758				rbi->dma_addr = new_dma_addr;
1759				rxd->addr = cpu_to_le64(rbi->dma_addr);
1760				rxd->len = rbi->len;
1761			}
1762		}
1763
1764
1765sop_done:
1766		skb = ctx->skb;
1767		if (rcd->eop) {
1768			u32 mtu = adapter->netdev->mtu;
1769			skb->len += skb->data_len;
1770
1771#ifdef VMXNET3_RSS
1772			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1773			    (adapter->netdev->features & NETIF_F_RXHASH)) {
1774				enum pkt_hash_types hash_type;
1775
1776				switch (rcd->rssType) {
1777				case VMXNET3_RCD_RSS_TYPE_IPV4:
1778				case VMXNET3_RCD_RSS_TYPE_IPV6:
1779					hash_type = PKT_HASH_TYPE_L3;
1780					break;
1781				case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1782				case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1783				case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1784				case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1785					hash_type = PKT_HASH_TYPE_L4;
1786					break;
1787				default:
1788					hash_type = PKT_HASH_TYPE_L3;
1789					break;
1790				}
1791				skb_set_hash(skb,
1792					     le32_to_cpu(rcd->rssHash),
1793					     hash_type);
1794			}
1795#endif
1796			vmxnet3_rx_csum(adapter, skb,
1797					(union Vmxnet3_GenericDesc *)rcd);
1798			skb->protocol = eth_type_trans(skb, adapter->netdev);
1799			if ((!rcd->tcp && !encap_lro) ||
1800			    !(adapter->netdev->features & NETIF_F_LRO))
1801				goto not_lro;
1802
1803			if (segCnt != 0 && mss != 0) {
1804				skb_shinfo(skb)->gso_type = rcd->v4 ?
1805					SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1806				skb_shinfo(skb)->gso_size = mss;
1807				skb_shinfo(skb)->gso_segs = segCnt;
1808			} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
1809				u32 hlen;
1810
1811				hlen = vmxnet3_get_hdr_len(adapter, skb,
1812					(union Vmxnet3_GenericDesc *)rcd);
1813				if (hlen == 0)
1814					goto not_lro;
1815
1816				skb_shinfo(skb)->gso_type =
1817					rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1818				if (segCnt != 0) {
1819					skb_shinfo(skb)->gso_segs = segCnt;
1820					skb_shinfo(skb)->gso_size =
1821						DIV_ROUND_UP(skb->len -
1822							hlen, segCnt);
1823				} else {
1824					skb_shinfo(skb)->gso_size = mtu - hlen;
1825				}
1826			}
1827not_lro:
1828			if (unlikely(rcd->ts))
1829				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1830
1831			/* Use GRO callback if UPT is enabled */
1832			if ((adapter->netdev->features & NETIF_F_LRO) &&
1833			    !rq->shared->updateRxProd)
1834				netif_receive_skb(skb);
1835			else
1836				napi_gro_receive(&rq->napi, skb);
1837
1838			ctx->skb = NULL;
1839			encap_lro = false;
1840			num_pkts++;
1841		}
1842
1843rcd_done:
1844		/* device may have skipped some rx descs */
1845		ring = rq->rx_ring + ring_idx;
1846		rbi->comp_state = VMXNET3_RXD_COMP_DONE;
1847
1848		comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
1849		fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
1850			      idx - ring->next2fill - 1;
1851		if (!ring->isOutOfOrder || fill_offset >= comp_offset)
1852			ring->next2comp = idx;
1853		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
 
1854
1855		/* Ensure that the writes to rxd->gen bits will be observed
1856		 * after all other writes to rxd objects.
1857		 */
1858		dma_wmb();
1859
1860		while (num_to_alloc) {
1861			rbi = rq->buf_info[ring_idx] + ring->next2fill;
1862			if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
1863				goto refill_buf;
1864			if (ring_idx == 0) {
1865				/* ring0 Type1 buffers can get skipped; re-fill them */
1866				if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
1867					goto refill_buf;
1868			}
1869			if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
1870refill_buf:
1871				vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1872						  &rxCmdDesc);
1873				WARN_ON(!rxd->addr);
1874
1875				/* Recv desc is ready to be used by the device */
1876				rxd->gen = ring->gen;
1877				vmxnet3_cmd_ring_adv_next2fill(ring);
1878				rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
1879				num_to_alloc--;
1880			} else {
1881				/* rx completion hasn't occurred */
1882				ring->isOutOfOrder = 1;
1883				break;
1884			}
1885		}
1886
1887		if (num_to_alloc == 0) {
1888			ring->isOutOfOrder = 0;
1889		}
1890
1891		/* if needed, update the register */
1892		if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
1893			VMXNET3_WRITE_BAR0_REG(adapter,
1894					       rxprod_reg[ring_idx] + rq->qid * 8,
1895					       ring->next2fill);
1896		}
1897
1898		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1899		vmxnet3_getRxComp(rcd,
1900				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1901	}
1902	if (need_flush)
1903		xdp_do_flush();
1904
1905	return num_pkts;
1906}
1907
1908
1909static void
1910vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1911		   struct vmxnet3_adapter *adapter)
1912{
1913	u32 i, ring_idx;
1914	struct Vmxnet3_RxDesc *rxd;
1915
1916	/* ring has already been cleaned up */
1917	if (!rq->rx_ring[0].base)
1918		return;
1919
1920	for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1921		for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1922			struct vmxnet3_rx_buf_info *rbi;
1923#ifdef __BIG_ENDIAN_BITFIELD
1924			struct Vmxnet3_RxDesc rxDesc;
1925#endif
1926
1927			rbi = &rq->buf_info[ring_idx][i];
1928			vmxnet3_getRxDesc(rxd,
1929				&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1930
1931			if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1932			    rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) {
1933				page_pool_recycle_direct(rq->page_pool,
1934							 rbi->page);
1935				rbi->page = NULL;
1936			} else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1937				   rbi->skb) {
1938				dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1939						 rxd->len, DMA_FROM_DEVICE);
1940				dev_kfree_skb(rbi->skb);
1941				rbi->skb = NULL;
1942			} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1943				   rbi->page) {
1944				dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1945					       rxd->len, DMA_FROM_DEVICE);
1946				put_page(rbi->page);
1947				rbi->page = NULL;
1948			}
1949		}
1950
1951		rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1952		rq->rx_ring[ring_idx].next2fill =
1953					rq->rx_ring[ring_idx].next2comp = 0;
1954	}
1955
1956	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1957	rq->comp_ring.next2proc = 0;
1958}
1959
1960
1961static void
1962vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1963{
1964	int i;
1965
1966	for (i = 0; i < adapter->num_rx_queues; i++)
1967		vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1968	rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
1969}
1970
1971
1972static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1973			       struct vmxnet3_adapter *adapter)
1974{
1975	int i;
1976	int j;
1977
1978	/* all rx buffers must have already been freed */
1979	for (i = 0; i < 2; i++) {
1980		if (rq->buf_info[i]) {
1981			for (j = 0; j < rq->rx_ring[i].size; j++)
1982				BUG_ON(rq->buf_info[i][j].page != NULL);
1983		}
1984	}
1985
1986
1987	for (i = 0; i < 2; i++) {
1988		if (rq->rx_ring[i].base) {
1989			dma_free_coherent(&adapter->pdev->dev,
1990					  rq->rx_ring[i].size
1991					  * sizeof(struct Vmxnet3_RxDesc),
1992					  rq->rx_ring[i].base,
1993					  rq->rx_ring[i].basePA);
1994			rq->rx_ring[i].base = NULL;
1995		}
1996	}
1997
1998	if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
1999		xdp_rxq_info_unreg(&rq->xdp_rxq);
2000	page_pool_destroy(rq->page_pool);
2001	rq->page_pool = NULL;
2002
2003	if (rq->data_ring.base) {
2004		dma_free_coherent(&adapter->pdev->dev,
2005				  rq->rx_ring[0].size * rq->data_ring.desc_size,
2006				  rq->data_ring.base, rq->data_ring.basePA);
2007		rq->data_ring.base = NULL;
2008	}
2009
2010	if (rq->comp_ring.base) {
2011		dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
2012				  * sizeof(struct Vmxnet3_RxCompDesc),
2013				  rq->comp_ring.base, rq->comp_ring.basePA);
2014		rq->comp_ring.base = NULL;
2015	}
2016
2017	kfree(rq->buf_info[0]);
2018	rq->buf_info[0] = NULL;
2019	rq->buf_info[1] = NULL;
 
 
 
 
2020}
2021
2022static void
2023vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
2024{
2025	int i;
2026
2027	for (i = 0; i < adapter->num_rx_queues; i++) {
2028		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2029
2030		if (rq->data_ring.base) {
2031			dma_free_coherent(&adapter->pdev->dev,
2032					  (rq->rx_ring[0].size *
2033					  rq->data_ring.desc_size),
2034					  rq->data_ring.base,
2035					  rq->data_ring.basePA);
2036			rq->data_ring.base = NULL;
2037			rq->data_ring.desc_size = 0;
2038		}
2039	}
2040}
2041
2042static int
2043vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
2044		struct vmxnet3_adapter  *adapter)
2045{
2046	int i, err;
2047
2048	/* initialize buf_info */
2049	for (i = 0; i < rq->rx_ring[0].size; i++) {
2050
2051		/* 1st buf for a pkt is skbuff or xdp page */
2052		if (i % adapter->rx_buf_per_pkt == 0) {
2053			rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
2054						      VMXNET3_RX_BUF_XDP :
2055						      VMXNET3_RX_BUF_SKB;
2056			rq->buf_info[0][i].len = adapter->skb_buf_size;
2057		} else { /* subsequent bufs for a pkt is frag */
2058			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
2059			rq->buf_info[0][i].len = PAGE_SIZE;
2060		}
2061	}
2062	for (i = 0; i < rq->rx_ring[1].size; i++) {
2063		rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
2064		rq->buf_info[1][i].len = PAGE_SIZE;
2065	}
2066
2067	/* reset internal state and allocate buffers for both rings */
2068	for (i = 0; i < 2; i++) {
2069		rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
2070
2071		memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
2072		       sizeof(struct Vmxnet3_RxDesc));
2073		rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
2074		rq->rx_ring[i].isOutOfOrder = 0;
2075	}
2076
2077	err = vmxnet3_create_pp(adapter, rq,
2078				rq->rx_ring[0].size + rq->rx_ring[1].size);
2079	if (err)
2080		return err;
2081
2082	if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
2083				    adapter) == 0) {
2084		xdp_rxq_info_unreg(&rq->xdp_rxq);
2085		page_pool_destroy(rq->page_pool);
2086		rq->page_pool = NULL;
2087
2088		/* at least has 1 rx buffer for the 1st ring */
2089		return -ENOMEM;
2090	}
2091	vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
2092
2093	/* reset the comp ring */
2094	rq->comp_ring.next2proc = 0;
2095	memset(rq->comp_ring.base, 0, rq->comp_ring.size *
2096	       sizeof(struct Vmxnet3_RxCompDesc));
2097	rq->comp_ring.gen = VMXNET3_INIT_GEN;
2098
2099	/* reset rxctx */
2100	rq->rx_ctx.skb = NULL;
2101
2102	/* stats are not reset */
2103	return 0;
2104}
2105
2106
2107static int
2108vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
2109{
2110	int i, err = 0;
2111
2112	for (i = 0; i < adapter->num_rx_queues; i++) {
2113		err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
2114		if (unlikely(err)) {
2115			dev_err(&adapter->netdev->dev, "%s: failed to "
2116				"initialize rx queue%i\n",
2117				adapter->netdev->name, i);
2118			break;
2119		}
2120	}
2121	return err;
2122
2123}
2124
2125
2126static int
2127vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
2128{
2129	int i;
2130	size_t sz;
2131	struct vmxnet3_rx_buf_info *bi;
2132
2133	for (i = 0; i < 2; i++) {
2134
2135		sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
2136		rq->rx_ring[i].base = dma_alloc_coherent(
2137						&adapter->pdev->dev, sz,
2138						&rq->rx_ring[i].basePA,
2139						GFP_KERNEL);
2140		if (!rq->rx_ring[i].base) {
2141			netdev_err(adapter->netdev,
2142				   "failed to allocate rx ring %d\n", i);
2143			goto err;
2144		}
2145	}
2146
2147	if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
2148		sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
2149		rq->data_ring.base =
2150			dma_alloc_coherent(&adapter->pdev->dev, sz,
2151					   &rq->data_ring.basePA,
2152					   GFP_KERNEL);
2153		if (!rq->data_ring.base) {
2154			netdev_err(adapter->netdev,
2155				   "rx data ring will be disabled\n");
2156			adapter->rxdataring_enabled = false;
2157		}
2158	} else {
2159		rq->data_ring.base = NULL;
2160		rq->data_ring.desc_size = 0;
2161	}
2162
2163	sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
2164	rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
2165						&rq->comp_ring.basePA,
2166						GFP_KERNEL);
2167	if (!rq->comp_ring.base) {
2168		netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
2169		goto err;
2170	}
2171
2172	bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
2173			  sizeof(rq->buf_info[0][0]), GFP_KERNEL,
2174			  dev_to_node(&adapter->pdev->dev));
 
2175	if (!bi)
2176		goto err;
2177
2178	rq->buf_info[0] = bi;
2179	rq->buf_info[1] = bi + rq->rx_ring[0].size;
2180
2181	return 0;
2182
2183err:
2184	vmxnet3_rq_destroy(rq, adapter);
2185	return -ENOMEM;
2186}
2187
2188
2189int
2190vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
2191{
2192	int i, err = 0;
2193
2194	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2195
2196	for (i = 0; i < adapter->num_rx_queues; i++) {
2197		err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
2198		if (unlikely(err)) {
2199			dev_err(&adapter->netdev->dev,
2200				"%s: failed to create rx queue%i\n",
2201				adapter->netdev->name, i);
2202			goto err_out;
2203		}
2204	}
2205
2206	if (!adapter->rxdataring_enabled)
2207		vmxnet3_rq_destroy_all_rxdataring(adapter);
2208
2209	return err;
2210err_out:
2211	vmxnet3_rq_destroy_all(adapter);
2212	return err;
2213
2214}
2215
2216/* Multiple queue aware polling function for tx and rx */
2217
2218static int
2219vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
2220{
2221	int rcd_done = 0, i;
2222	if (unlikely(adapter->shared->ecr))
2223		vmxnet3_process_events(adapter);
2224	for (i = 0; i < adapter->num_tx_queues; i++)
2225		vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
2226
2227	for (i = 0; i < adapter->num_rx_queues; i++)
2228		rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
2229						   adapter, budget);
2230	return rcd_done;
2231}
2232
2233
2234static int
2235vmxnet3_poll(struct napi_struct *napi, int budget)
2236{
2237	struct vmxnet3_rx_queue *rx_queue = container_of(napi,
2238					  struct vmxnet3_rx_queue, napi);
2239	int rxd_done;
2240
2241	rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
2242
2243	if (rxd_done < budget) {
2244		napi_complete_done(napi, rxd_done);
2245		vmxnet3_enable_all_intrs(rx_queue->adapter);
2246	}
2247	return rxd_done;
2248}
2249
2250/*
2251 * NAPI polling function for MSI-X mode with multiple Rx queues
2252 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
2253 */
2254
2255static int
2256vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
2257{
2258	struct vmxnet3_rx_queue *rq = container_of(napi,
2259						struct vmxnet3_rx_queue, napi);
2260	struct vmxnet3_adapter *adapter = rq->adapter;
2261	int rxd_done;
2262
2263	/* When sharing interrupt with corresponding tx queue, process
2264	 * tx completions in that queue as well
2265	 */
2266	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
2267		struct vmxnet3_tx_queue *tq =
2268				&adapter->tx_queue[rq - adapter->rx_queue];
2269		vmxnet3_tq_tx_complete(tq, adapter);
2270	}
2271
2272	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2273
2274	if (rxd_done < budget) {
2275		napi_complete_done(napi, rxd_done);
2276		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2277	}
2278	return rxd_done;
2279}
2280
2281
2282#ifdef CONFIG_PCI_MSI
2283
2284/*
2285 * Handle completion interrupts on tx queues
2286 * Returns whether or not the intr is handled
2287 */
2288
2289static irqreturn_t
2290vmxnet3_msix_tx(int irq, void *data)
2291{
2292	struct vmxnet3_tx_queue *tq = data;
2293	struct vmxnet3_adapter *adapter = tq->adapter;
2294
2295	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2296		vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2297
2298	/* Handle the case where only one irq is allocate for all tx queues */
2299	if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2300		int i;
2301		for (i = 0; i < adapter->num_tx_queues; i++) {
2302			struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2303			vmxnet3_tq_tx_complete(txq, adapter);
2304		}
2305	} else {
2306		vmxnet3_tq_tx_complete(tq, adapter);
2307	}
2308	vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2309
2310	return IRQ_HANDLED;
2311}
2312
2313
2314/*
2315 * Handle completion interrupts on rx queues. Returns whether or not the
2316 * intr is handled
2317 */
2318
2319static irqreturn_t
2320vmxnet3_msix_rx(int irq, void *data)
2321{
2322	struct vmxnet3_rx_queue *rq = data;
2323	struct vmxnet3_adapter *adapter = rq->adapter;
2324
2325	/* disable intr if needed */
2326	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2327		vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2328	napi_schedule(&rq->napi);
2329
2330	return IRQ_HANDLED;
2331}
2332
2333/*
2334 *----------------------------------------------------------------------------
2335 *
2336 * vmxnet3_msix_event --
2337 *
2338 *    vmxnet3 msix event intr handler
2339 *
2340 * Result:
2341 *    whether or not the intr is handled
2342 *
2343 *----------------------------------------------------------------------------
2344 */
2345
2346static irqreturn_t
2347vmxnet3_msix_event(int irq, void *data)
2348{
2349	struct net_device *dev = data;
2350	struct vmxnet3_adapter *adapter = netdev_priv(dev);
2351
2352	/* disable intr if needed */
2353	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2354		vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2355
2356	if (adapter->shared->ecr)
2357		vmxnet3_process_events(adapter);
2358
2359	vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2360
2361	return IRQ_HANDLED;
2362}
2363
2364#endif /* CONFIG_PCI_MSI  */
2365
2366
2367/* Interrupt handler for vmxnet3  */
2368static irqreturn_t
2369vmxnet3_intr(int irq, void *dev_id)
2370{
2371	struct net_device *dev = dev_id;
2372	struct vmxnet3_adapter *adapter = netdev_priv(dev);
2373
2374	if (adapter->intr.type == VMXNET3_IT_INTX) {
2375		u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2376		if (unlikely(icr == 0))
2377			/* not ours */
2378			return IRQ_NONE;
2379	}
2380
2381
2382	/* disable intr if needed */
2383	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2384		vmxnet3_disable_all_intrs(adapter);
2385
2386	napi_schedule(&adapter->rx_queue[0].napi);
2387
2388	return IRQ_HANDLED;
2389}
2390
2391#ifdef CONFIG_NET_POLL_CONTROLLER
2392
2393/* netpoll callback. */
2394static void
2395vmxnet3_netpoll(struct net_device *netdev)
2396{
2397	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2398
2399	switch (adapter->intr.type) {
2400#ifdef CONFIG_PCI_MSI
2401	case VMXNET3_IT_MSIX: {
2402		int i;
2403		for (i = 0; i < adapter->num_rx_queues; i++)
2404			vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2405		break;
2406	}
2407#endif
2408	case VMXNET3_IT_MSI:
2409	default:
2410		vmxnet3_intr(0, adapter->netdev);
2411		break;
2412	}
2413
2414}
2415#endif	/* CONFIG_NET_POLL_CONTROLLER */
2416
2417static int
2418vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2419{
2420	struct vmxnet3_intr *intr = &adapter->intr;
2421	int err = 0, i;
2422	int vector = 0;
2423
2424#ifdef CONFIG_PCI_MSI
2425	if (adapter->intr.type == VMXNET3_IT_MSIX) {
2426		for (i = 0; i < adapter->num_tx_queues; i++) {
2427			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2428				sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2429					adapter->netdev->name, vector);
2430				err = request_irq(
2431					      intr->msix_entries[vector].vector,
2432					      vmxnet3_msix_tx, 0,
2433					      adapter->tx_queue[i].name,
2434					      &adapter->tx_queue[i]);
2435			} else {
2436				sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2437					adapter->netdev->name, vector);
2438			}
2439			if (err) {
2440				dev_err(&adapter->netdev->dev,
2441					"Failed to request irq for MSIX, %s, "
2442					"error %d\n",
2443					adapter->tx_queue[i].name, err);
2444				return err;
2445			}
2446
2447			/* Handle the case where only 1 MSIx was allocated for
2448			 * all tx queues */
2449			if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2450				for (; i < adapter->num_tx_queues; i++)
2451					adapter->tx_queue[i].comp_ring.intr_idx
2452								= vector;
2453				vector++;
2454				break;
2455			} else {
2456				adapter->tx_queue[i].comp_ring.intr_idx
2457								= vector++;
2458			}
2459		}
2460		if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2461			vector = 0;
2462
2463		for (i = 0; i < adapter->num_rx_queues; i++) {
2464			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2465				sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2466					adapter->netdev->name, vector);
2467			else
2468				sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2469					adapter->netdev->name, vector);
2470			err = request_irq(intr->msix_entries[vector].vector,
2471					  vmxnet3_msix_rx, 0,
2472					  adapter->rx_queue[i].name,
2473					  &(adapter->rx_queue[i]));
2474			if (err) {
2475				netdev_err(adapter->netdev,
2476					   "Failed to request irq for MSIX, "
2477					   "%s, error %d\n",
2478					   adapter->rx_queue[i].name, err);
2479				return err;
2480			}
2481
2482			adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2483		}
2484
2485		sprintf(intr->event_msi_vector_name, "%s-event-%d",
2486			adapter->netdev->name, vector);
2487		err = request_irq(intr->msix_entries[vector].vector,
2488				  vmxnet3_msix_event, 0,
2489				  intr->event_msi_vector_name, adapter->netdev);
2490		intr->event_intr_idx = vector;
2491
2492	} else if (intr->type == VMXNET3_IT_MSI) {
2493		adapter->num_rx_queues = 1;
2494		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2495				  adapter->netdev->name, adapter->netdev);
2496	} else {
2497#endif
2498		adapter->num_rx_queues = 1;
2499		err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2500				  IRQF_SHARED, adapter->netdev->name,
2501				  adapter->netdev);
2502#ifdef CONFIG_PCI_MSI
2503	}
2504#endif
2505	intr->num_intrs = vector + 1;
2506	if (err) {
2507		netdev_err(adapter->netdev,
2508			   "Failed to request irq (intr type:%d), error %d\n",
2509			   intr->type, err);
2510	} else {
2511		/* Number of rx queues will not change after this */
2512		for (i = 0; i < adapter->num_rx_queues; i++) {
2513			struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2514			rq->qid = i;
2515			rq->qid2 = i + adapter->num_rx_queues;
2516			rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2517		}
2518
2519		/* init our intr settings */
2520		for (i = 0; i < intr->num_intrs; i++)
2521			intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2522		if (adapter->intr.type != VMXNET3_IT_MSIX) {
2523			adapter->intr.event_intr_idx = 0;
2524			for (i = 0; i < adapter->num_tx_queues; i++)
2525				adapter->tx_queue[i].comp_ring.intr_idx = 0;
2526			adapter->rx_queue[0].comp_ring.intr_idx = 0;
2527		}
2528
2529		netdev_info(adapter->netdev,
2530			    "intr type %u, mode %u, %u vectors allocated\n",
2531			    intr->type, intr->mask_mode, intr->num_intrs);
2532	}
2533
2534	return err;
2535}
2536
2537
2538static void
2539vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2540{
2541	struct vmxnet3_intr *intr = &adapter->intr;
2542	BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2543
2544	switch (intr->type) {
2545#ifdef CONFIG_PCI_MSI
2546	case VMXNET3_IT_MSIX:
2547	{
2548		int i, vector = 0;
2549
2550		if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2551			for (i = 0; i < adapter->num_tx_queues; i++) {
2552				free_irq(intr->msix_entries[vector++].vector,
2553					 &(adapter->tx_queue[i]));
2554				if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2555					break;
2556			}
2557		}
2558
2559		for (i = 0; i < adapter->num_rx_queues; i++) {
2560			free_irq(intr->msix_entries[vector++].vector,
2561				 &(adapter->rx_queue[i]));
2562		}
2563
2564		free_irq(intr->msix_entries[vector].vector,
2565			 adapter->netdev);
2566		BUG_ON(vector >= intr->num_intrs);
2567		break;
2568	}
2569#endif
2570	case VMXNET3_IT_MSI:
2571		free_irq(adapter->pdev->irq, adapter->netdev);
2572		break;
2573	case VMXNET3_IT_INTX:
2574		free_irq(adapter->pdev->irq, adapter->netdev);
2575		break;
2576	default:
2577		BUG();
2578	}
2579}
2580
2581
2582static void
2583vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2584{
2585	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2586	u16 vid;
2587
2588	/* allow untagged pkts */
2589	VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2590
2591	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2592		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2593}
2594
2595
2596static int
2597vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2598{
2599	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2600
2601	if (!(netdev->flags & IFF_PROMISC)) {
2602		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2603		unsigned long flags;
2604
2605		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2606		spin_lock_irqsave(&adapter->cmd_lock, flags);
2607		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2608				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2609		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2610	}
2611
2612	set_bit(vid, adapter->active_vlans);
2613
2614	return 0;
2615}
2616
2617
2618static int
2619vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2620{
2621	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2622
2623	if (!(netdev->flags & IFF_PROMISC)) {
2624		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2625		unsigned long flags;
2626
2627		VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2628		spin_lock_irqsave(&adapter->cmd_lock, flags);
2629		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2630				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2631		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2632	}
2633
2634	clear_bit(vid, adapter->active_vlans);
2635
2636	return 0;
2637}
2638
2639
2640static u8 *
2641vmxnet3_copy_mc(struct net_device *netdev)
2642{
2643	u8 *buf = NULL;
2644	u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2645
2646	/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2647	if (sz <= 0xffff) {
2648		/* We may be called with BH disabled */
2649		buf = kmalloc(sz, GFP_ATOMIC);
2650		if (buf) {
2651			struct netdev_hw_addr *ha;
2652			int i = 0;
2653
2654			netdev_for_each_mc_addr(ha, netdev)
2655				memcpy(buf + i++ * ETH_ALEN, ha->addr,
2656				       ETH_ALEN);
2657		}
2658	}
2659	return buf;
2660}
2661
2662
2663static void
2664vmxnet3_set_mc(struct net_device *netdev)
2665{
2666	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2667	unsigned long flags;
2668	struct Vmxnet3_RxFilterConf *rxConf =
2669					&adapter->shared->devRead.rxFilterConf;
2670	u8 *new_table = NULL;
2671	dma_addr_t new_table_pa = 0;
2672	bool new_table_pa_valid = false;
2673	u32 new_mode = VMXNET3_RXM_UCAST;
2674
2675	if (netdev->flags & IFF_PROMISC) {
2676		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2677		memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2678
2679		new_mode |= VMXNET3_RXM_PROMISC;
2680	} else {
2681		vmxnet3_restore_vlan(adapter);
2682	}
2683
2684	if (netdev->flags & IFF_BROADCAST)
2685		new_mode |= VMXNET3_RXM_BCAST;
2686
2687	if (netdev->flags & IFF_ALLMULTI)
2688		new_mode |= VMXNET3_RXM_ALL_MULTI;
2689	else
2690		if (!netdev_mc_empty(netdev)) {
2691			new_table = vmxnet3_copy_mc(netdev);
2692			if (new_table) {
2693				size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2694
2695				rxConf->mfTableLen = cpu_to_le16(sz);
2696				new_table_pa = dma_map_single(
2697							&adapter->pdev->dev,
2698							new_table,
2699							sz,
2700							DMA_TO_DEVICE);
2701				if (!dma_mapping_error(&adapter->pdev->dev,
2702						       new_table_pa)) {
2703					new_mode |= VMXNET3_RXM_MCAST;
2704					new_table_pa_valid = true;
2705					rxConf->mfTablePA = cpu_to_le64(
2706								new_table_pa);
2707				}
2708			}
2709			if (!new_table_pa_valid) {
2710				netdev_info(netdev,
2711					    "failed to copy mcast list, setting ALL_MULTI\n");
2712				new_mode |= VMXNET3_RXM_ALL_MULTI;
2713			}
2714		}
2715
2716	if (!(new_mode & VMXNET3_RXM_MCAST)) {
2717		rxConf->mfTableLen = 0;
2718		rxConf->mfTablePA = 0;
2719	}
2720
2721	spin_lock_irqsave(&adapter->cmd_lock, flags);
2722	if (new_mode != rxConf->rxMode) {
2723		rxConf->rxMode = cpu_to_le32(new_mode);
2724		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2725				       VMXNET3_CMD_UPDATE_RX_MODE);
2726		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2727				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2728	}
2729
2730	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2731			       VMXNET3_CMD_UPDATE_MAC_FILTERS);
2732	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2733
2734	if (new_table_pa_valid)
2735		dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2736				 rxConf->mfTableLen, DMA_TO_DEVICE);
2737	kfree(new_table);
2738}
2739
2740void
2741vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2742{
2743	int i;
2744
2745	for (i = 0; i < adapter->num_rx_queues; i++)
2746		vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2747}
2748
2749
2750/*
2751 *   Set up driver_shared based on settings in adapter.
2752 */
2753
2754static void
2755vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2756{
2757	struct Vmxnet3_DriverShared *shared = adapter->shared;
2758	struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2759	struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2760	struct Vmxnet3_TxQueueConf *tqc;
2761	struct Vmxnet3_RxQueueConf *rqc;
2762	int i;
2763
2764	memset(shared, 0, sizeof(*shared));
2765
2766	/* driver settings */
2767	shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2768	devRead->misc.driverInfo.version = cpu_to_le32(
2769						VMXNET3_DRIVER_VERSION_NUM);
2770	devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2771				VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2772	devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2773	*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2774				*((u32 *)&devRead->misc.driverInfo.gos));
2775	devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2776	devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2777
2778	devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2779	devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2780
2781	/* set up feature flags */
2782	if (adapter->netdev->features & NETIF_F_RXCSUM)
2783		devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2784
2785	if (adapter->netdev->features & NETIF_F_LRO) {
2786		devRead->misc.uptFeatures |= UPT1_F_LRO;
2787		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2788	}
2789	if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2790		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2791
2792	if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2793					 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2794		devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2795
2796	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2797	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2798	devRead->misc.queueDescLen = cpu_to_le32(
2799		adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2800		adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2801
2802	/* tx queue settings */
2803	devRead->misc.numTxQueues =  adapter->num_tx_queues;
2804	for (i = 0; i < adapter->num_tx_queues; i++) {
2805		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
2806		BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2807		tqc = &adapter->tqd_start[i].conf;
2808		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
2809		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2810		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2811		tqc->ddPA           = cpu_to_le64(~0ULL);
2812		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
2813		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
2814		tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2815		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
2816		tqc->ddLen          = cpu_to_le32(0);
 
 
2817		tqc->intrIdx        = tq->comp_ring.intr_idx;
2818	}
2819
2820	/* rx queue settings */
2821	devRead->misc.numRxQueues = adapter->num_rx_queues;
2822	for (i = 0; i < adapter->num_rx_queues; i++) {
2823		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
2824		rqc = &adapter->rqd_start[i].conf;
2825		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2826		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2827		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
2828		rqc->ddPA            = cpu_to_le64(~0ULL);
2829		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
2830		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
2831		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
2832		rqc->ddLen           = cpu_to_le32(0);
 
 
 
2833		rqc->intrIdx         = rq->comp_ring.intr_idx;
2834		if (VMXNET3_VERSION_GE_3(adapter)) {
2835			rqc->rxDataRingBasePA =
2836				cpu_to_le64(rq->data_ring.basePA);
2837			rqc->rxDataRingDescSize =
2838				cpu_to_le16(rq->data_ring.desc_size);
2839		}
2840	}
2841
2842#ifdef VMXNET3_RSS
2843	memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2844
2845	if (adapter->rss) {
2846		struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2847
2848		devRead->misc.uptFeatures |= UPT1_F_RSS;
2849		devRead->misc.numRxQueues = adapter->num_rx_queues;
2850		rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2851				    UPT1_RSS_HASH_TYPE_IPV4 |
2852				    UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2853				    UPT1_RSS_HASH_TYPE_IPV6;
2854		rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2855		rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2856		rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2857		netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2858
2859		for (i = 0; i < rssConf->indTableSize; i++)
2860			rssConf->indTable[i] = ethtool_rxfh_indir_default(
2861				i, adapter->num_rx_queues);
2862
2863		devRead->rssConfDesc.confVer = 1;
2864		devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2865		devRead->rssConfDesc.confPA =
2866			cpu_to_le64(adapter->rss_conf_pa);
2867	}
2868
2869#endif /* VMXNET3_RSS */
2870
2871	/* intr settings */
2872	if (!VMXNET3_VERSION_GE_6(adapter) ||
2873	    !adapter->queuesExtEnabled) {
2874		devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2875					     VMXNET3_IMM_AUTO;
2876		devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2877		for (i = 0; i < adapter->intr.num_intrs; i++)
2878			devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2879
2880		devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2881		devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2882	} else {
2883		devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
2884						   VMXNET3_IMM_AUTO;
2885		devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
2886		for (i = 0; i < adapter->intr.num_intrs; i++)
2887			devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
2888
2889		devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
2890		devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2891	}
2892
2893	/* rx filter settings */
2894	devRead->rxFilterConf.rxMode = 0;
2895	vmxnet3_restore_vlan(adapter);
2896	vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2897
2898	/* the rest are already zeroed */
2899}
2900
2901static void
2902vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
2903{
2904	struct Vmxnet3_DriverShared *shared = adapter->shared;
2905	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2906	unsigned long flags;
2907
2908	if (!VMXNET3_VERSION_GE_7(adapter))
2909		return;
2910
2911	cmdInfo->ringBufSize = adapter->ringBufSize;
2912	spin_lock_irqsave(&adapter->cmd_lock, flags);
2913	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2914			       VMXNET3_CMD_SET_RING_BUFFER_SIZE);
2915	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2916}
2917
2918static void
2919vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2920{
2921	struct Vmxnet3_DriverShared *shared = adapter->shared;
2922	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2923	unsigned long flags;
2924
2925	if (!VMXNET3_VERSION_GE_3(adapter))
2926		return;
2927
2928	spin_lock_irqsave(&adapter->cmd_lock, flags);
2929	cmdInfo->varConf.confVer = 1;
2930	cmdInfo->varConf.confLen =
2931		cpu_to_le32(sizeof(*adapter->coal_conf));
2932	cmdInfo->varConf.confPA  = cpu_to_le64(adapter->coal_conf_pa);
2933
2934	if (adapter->default_coal_mode) {
2935		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2936				       VMXNET3_CMD_GET_COALESCE);
2937	} else {
2938		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2939				       VMXNET3_CMD_SET_COALESCE);
2940	}
2941
2942	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2943}
2944
2945static void
2946vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2947{
2948	struct Vmxnet3_DriverShared *shared = adapter->shared;
2949	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2950	unsigned long flags;
2951
2952	if (!VMXNET3_VERSION_GE_4(adapter))
2953		return;
2954
2955	spin_lock_irqsave(&adapter->cmd_lock, flags);
2956
2957	if (adapter->default_rss_fields) {
2958		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2959				       VMXNET3_CMD_GET_RSS_FIELDS);
2960		adapter->rss_fields =
2961			VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2962	} else {
2963		if (VMXNET3_VERSION_GE_7(adapter)) {
2964			if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
2965			     adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
2966			    vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2967						       VMXNET3_CAP_UDP_RSS)) {
2968				adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
2969			} else {
2970				adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
2971			}
2972
2973			if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
2974			    vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2975						       VMXNET3_CAP_ESP_RSS_IPV4)) {
2976				adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
2977			} else {
2978				adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
2979			}
2980
2981			if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
2982			    vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2983						       VMXNET3_CAP_ESP_RSS_IPV6)) {
2984				adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
2985			} else {
2986				adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
2987			}
2988
2989			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
2990			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
2991			adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2992		}
2993		cmdInfo->setRssFields = adapter->rss_fields;
2994		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2995				       VMXNET3_CMD_SET_RSS_FIELDS);
2996		/* Not all requested RSS may get applied, so get and
2997		 * cache what was actually applied.
2998		 */
2999		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3000				       VMXNET3_CMD_GET_RSS_FIELDS);
3001		adapter->rss_fields =
3002			VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3003	}
3004
3005	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3006}
3007
3008int
3009vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
3010{
3011	int err, i;
3012	u32 ret;
3013	unsigned long flags;
3014
3015	netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
3016		" ring sizes %u %u %u\n", adapter->netdev->name,
3017		adapter->skb_buf_size, adapter->rx_buf_per_pkt,
3018		adapter->tx_queue[0].tx_ring.size,
3019		adapter->rx_queue[0].rx_ring[0].size,
3020		adapter->rx_queue[0].rx_ring[1].size);
3021
3022	vmxnet3_tq_init_all(adapter);
3023	err = vmxnet3_rq_init_all(adapter);
3024	if (err) {
3025		netdev_err(adapter->netdev,
3026			   "Failed to init rx queue error %d\n", err);
3027		goto rq_err;
3028	}
3029
3030	err = vmxnet3_request_irqs(adapter);
3031	if (err) {
3032		netdev_err(adapter->netdev,
3033			   "Failed to setup irq for error %d\n", err);
3034		goto irq_err;
3035	}
3036
3037	vmxnet3_setup_driver_shared(adapter);
3038
3039	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
3040			       adapter->shared_pa));
3041	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
3042			       adapter->shared_pa));
3043	spin_lock_irqsave(&adapter->cmd_lock, flags);
3044	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3045			       VMXNET3_CMD_ACTIVATE_DEV);
3046	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3047	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3048
3049	if (ret != 0) {
3050		netdev_err(adapter->netdev,
3051			   "Failed to activate dev: error %u\n", ret);
3052		err = -EINVAL;
3053		goto activate_err;
3054	}
3055
3056	vmxnet3_init_bufsize(adapter);
3057	vmxnet3_init_coalesce(adapter);
3058	vmxnet3_init_rssfields(adapter);
3059
3060	for (i = 0; i < adapter->num_rx_queues; i++) {
3061		VMXNET3_WRITE_BAR0_REG(adapter,
3062				adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
3063				adapter->rx_queue[i].rx_ring[0].next2fill);
3064		VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
3065				(i * VMXNET3_REG_ALIGN)),
3066				adapter->rx_queue[i].rx_ring[1].next2fill);
3067	}
3068
3069	/* Apply the rx filter settins last. */
3070	vmxnet3_set_mc(adapter->netdev);
3071
3072	/*
3073	 * Check link state when first activating device. It will start the
3074	 * tx queue if the link is up.
3075	 */
3076	vmxnet3_check_link(adapter, true);
3077	netif_tx_wake_all_queues(adapter->netdev);
3078	for (i = 0; i < adapter->num_rx_queues; i++)
3079		napi_enable(&adapter->rx_queue[i].napi);
3080	vmxnet3_enable_all_intrs(adapter);
3081	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3082	return 0;
3083
3084activate_err:
3085	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
3086	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
3087	vmxnet3_free_irqs(adapter);
3088irq_err:
3089rq_err:
3090	/* free up buffers we allocated */
3091	vmxnet3_rq_cleanup_all(adapter);
3092	return err;
3093}
3094
3095
3096void
3097vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
3098{
3099	unsigned long flags;
3100	spin_lock_irqsave(&adapter->cmd_lock, flags);
3101	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
3102	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3103}
3104
3105
3106int
3107vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
3108{
3109	int i;
3110	unsigned long flags;
3111	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
3112		return 0;
3113
3114
3115	spin_lock_irqsave(&adapter->cmd_lock, flags);
3116	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3117			       VMXNET3_CMD_QUIESCE_DEV);
3118	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3119	vmxnet3_disable_all_intrs(adapter);
3120
3121	for (i = 0; i < adapter->num_rx_queues; i++)
3122		napi_disable(&adapter->rx_queue[i].napi);
3123	netif_tx_disable(adapter->netdev);
3124	adapter->link_speed = 0;
3125	netif_carrier_off(adapter->netdev);
3126
3127	vmxnet3_tq_cleanup_all(adapter);
3128	vmxnet3_rq_cleanup_all(adapter);
3129	vmxnet3_free_irqs(adapter);
3130	return 0;
3131}
3132
3133
3134static void
3135vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
3136{
3137	u32 tmp;
3138
3139	tmp = *(u32 *)mac;
3140	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
3141
3142	tmp = (mac[5] << 8) | mac[4];
3143	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
3144}
3145
3146
3147static int
3148vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
3149{
3150	struct sockaddr *addr = p;
3151	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3152
3153	dev_addr_set(netdev, addr->sa_data);
3154	vmxnet3_write_mac_addr(adapter, addr->sa_data);
3155
3156	return 0;
3157}
3158
3159
3160/* ==================== initialization and cleanup routines ============ */
3161
3162static int
3163vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
3164{
3165	int err;
3166	unsigned long mmio_start, mmio_len;
3167	struct pci_dev *pdev = adapter->pdev;
3168
3169	err = pci_enable_device(pdev);
3170	if (err) {
3171		dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
3172		return err;
3173	}
3174
3175	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
3176					   vmxnet3_driver_name);
3177	if (err) {
3178		dev_err(&pdev->dev,
3179			"Failed to request region for adapter: error %d\n", err);
3180		goto err_enable_device;
3181	}
3182
3183	pci_set_master(pdev);
3184
3185	mmio_start = pci_resource_start(pdev, 0);
3186	mmio_len = pci_resource_len(pdev, 0);
3187	adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
3188	if (!adapter->hw_addr0) {
3189		dev_err(&pdev->dev, "Failed to map bar0\n");
3190		err = -EIO;
3191		goto err_ioremap;
3192	}
3193
3194	mmio_start = pci_resource_start(pdev, 1);
3195	mmio_len = pci_resource_len(pdev, 1);
3196	adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
3197	if (!adapter->hw_addr1) {
3198		dev_err(&pdev->dev, "Failed to map bar1\n");
3199		err = -EIO;
3200		goto err_bar1;
3201	}
3202	return 0;
3203
3204err_bar1:
3205	iounmap(adapter->hw_addr0);
3206err_ioremap:
3207	pci_release_selected_regions(pdev, (1 << 2) - 1);
3208err_enable_device:
3209	pci_disable_device(pdev);
3210	return err;
3211}
3212
3213
3214static void
3215vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
3216{
3217	BUG_ON(!adapter->pdev);
3218
3219	iounmap(adapter->hw_addr0);
3220	iounmap(adapter->hw_addr1);
3221	pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
3222	pci_disable_device(adapter->pdev);
3223}
3224
3225
3226void
3227vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
3228{
3229	size_t sz, i, ring0_size, ring1_size, comp_size;
3230	/* With version7 ring1 will have only T0 buffers */
3231	if (!VMXNET3_VERSION_GE_7(adapter)) {
3232		if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
3233					    VMXNET3_MAX_ETH_HDR_SIZE) {
3234			adapter->skb_buf_size = adapter->netdev->mtu +
3235						VMXNET3_MAX_ETH_HDR_SIZE;
3236			if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
3237				adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
3238
3239			adapter->rx_buf_per_pkt = 1;
3240		} else {
3241			adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
3242			sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
3243						    VMXNET3_MAX_ETH_HDR_SIZE;
3244			adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
3245		}
3246	} else {
3247		adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
3248					    VMXNET3_MAX_SKB_BUF_SIZE);
3249		adapter->rx_buf_per_pkt = 1;
3250		adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
3251		adapter->ringBufSize.ring1BufSizeType1 = 0;
3252		adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
 
 
3253	}
3254
3255	/*
3256	 * for simplicity, force the ring0 size to be a multiple of
3257	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
3258	 */
3259	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
3260	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
3261	ring0_size = (ring0_size + sz - 1) / sz * sz;
3262	ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
3263			   sz * sz);
3264	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
3265	ring1_size = (ring1_size + sz - 1) / sz * sz;
3266	ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
3267			   sz * sz);
3268	/* For v7 and later, keep ring size power of 2 for UPT */
3269	if (VMXNET3_VERSION_GE_7(adapter)) {
3270		ring0_size = rounddown_pow_of_two(ring0_size);
3271		ring1_size = rounddown_pow_of_two(ring1_size);
3272	}
3273	comp_size = ring0_size + ring1_size;
3274
3275	for (i = 0; i < adapter->num_rx_queues; i++) {
3276		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
3277
3278		rq->rx_ring[0].size = ring0_size;
3279		rq->rx_ring[1].size = ring1_size;
3280		rq->comp_ring.size = comp_size;
3281	}
3282}
3283
3284
3285int
3286vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
3287		      u32 rx_ring_size, u32 rx_ring2_size,
3288		      u16 txdata_desc_size, u16 rxdata_desc_size)
3289{
3290	int err = 0, i;
3291
3292	for (i = 0; i < adapter->num_tx_queues; i++) {
3293		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
3294		tq->tx_ring.size   = tx_ring_size;
3295		tq->data_ring.size = tx_ring_size;
3296		tq->comp_ring.size = tx_ring_size;
3297		tq->txdata_desc_size = txdata_desc_size;
3298		tq->shared = &adapter->tqd_start[i].ctrl;
3299		tq->stopped = true;
3300		tq->adapter = adapter;
3301		tq->qid = i;
3302		err = vmxnet3_tq_create(tq, adapter);
3303		/*
3304		 * Too late to change num_tx_queues. We cannot do away with
3305		 * lesser number of queues than what we asked for
3306		 */
3307		if (err)
3308			goto queue_err;
3309	}
3310
3311	adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
3312	adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
3313	vmxnet3_adjust_rx_ring_size(adapter);
3314
3315	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
3316	for (i = 0; i < adapter->num_rx_queues; i++) {
3317		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3318		/* qid and qid2 for rx queues will be assigned later when num
3319		 * of rx queues is finalized after allocating intrs */
3320		rq->shared = &adapter->rqd_start[i].ctrl;
3321		rq->adapter = adapter;
3322		rq->data_ring.desc_size = rxdata_desc_size;
3323		err = vmxnet3_rq_create(rq, adapter);
3324		if (err) {
3325			if (i == 0) {
3326				netdev_err(adapter->netdev,
3327					   "Could not allocate any rx queues. "
3328					   "Aborting.\n");
3329				goto queue_err;
3330			} else {
3331				netdev_info(adapter->netdev,
3332					    "Number of rx queues changed "
3333					    "to : %d.\n", i);
3334				adapter->num_rx_queues = i;
3335				err = 0;
3336				break;
3337			}
3338		}
3339	}
3340
3341	if (!adapter->rxdataring_enabled)
3342		vmxnet3_rq_destroy_all_rxdataring(adapter);
3343
3344	return err;
3345queue_err:
3346	vmxnet3_tq_destroy_all(adapter);
3347	return err;
3348}
3349
3350static int
3351vmxnet3_open(struct net_device *netdev)
3352{
3353	struct vmxnet3_adapter *adapter;
3354	int err, i;
3355
3356	adapter = netdev_priv(netdev);
3357
3358	for (i = 0; i < adapter->num_tx_queues; i++)
3359		spin_lock_init(&adapter->tx_queue[i].tx_lock);
3360
3361	if (VMXNET3_VERSION_GE_3(adapter)) {
3362		unsigned long flags;
3363		u16 txdata_desc_size;
3364
3365		spin_lock_irqsave(&adapter->cmd_lock, flags);
3366		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3367				       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
3368		txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
3369							 VMXNET3_REG_CMD);
3370		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3371
3372		if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
3373		    (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
3374		    (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
3375			adapter->txdata_desc_size =
3376				sizeof(struct Vmxnet3_TxDataDesc);
3377		} else {
3378			adapter->txdata_desc_size = txdata_desc_size;
3379		}
3380	} else {
3381		adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3382	}
3383
3384	err = vmxnet3_create_queues(adapter,
3385				    adapter->tx_ring_size,
3386				    adapter->rx_ring_size,
3387				    adapter->rx_ring2_size,
3388				    adapter->txdata_desc_size,
3389				    adapter->rxdata_desc_size);
3390	if (err)
3391		goto queue_err;
3392
3393	err = vmxnet3_activate_dev(adapter);
3394	if (err)
3395		goto activate_err;
3396
3397	return 0;
3398
3399activate_err:
3400	vmxnet3_rq_destroy_all(adapter);
3401	vmxnet3_tq_destroy_all(adapter);
3402queue_err:
3403	return err;
3404}
3405
3406
3407static int
3408vmxnet3_close(struct net_device *netdev)
3409{
3410	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3411
3412	/*
3413	 * Reset_work may be in the middle of resetting the device, wait for its
3414	 * completion.
3415	 */
3416	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3417		usleep_range(1000, 2000);
3418
3419	vmxnet3_quiesce_dev(adapter);
3420
3421	vmxnet3_rq_destroy_all(adapter);
3422	vmxnet3_tq_destroy_all(adapter);
3423
3424	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3425
3426
3427	return 0;
3428}
3429
3430
3431void
3432vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3433{
3434	int i;
3435
3436	/*
3437	 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3438	 * vmxnet3_close() will deadlock.
3439	 */
3440	BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3441
3442	/* we need to enable NAPI, otherwise dev_close will deadlock */
3443	for (i = 0; i < adapter->num_rx_queues; i++)
3444		napi_enable(&adapter->rx_queue[i].napi);
3445	/*
3446	 * Need to clear the quiesce bit to ensure that vmxnet3_close
3447	 * can quiesce the device properly
3448	 */
3449	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3450	dev_close(adapter->netdev);
3451}
3452
3453
3454static int
3455vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3456{
3457	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3458	int err = 0;
3459
3460	netdev->mtu = new_mtu;
3461
3462	/*
3463	 * Reset_work may be in the middle of resetting the device, wait for its
3464	 * completion.
3465	 */
3466	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3467		usleep_range(1000, 2000);
3468
3469	if (netif_running(netdev)) {
3470		vmxnet3_quiesce_dev(adapter);
3471		vmxnet3_reset_dev(adapter);
3472
3473		/* we need to re-create the rx queue based on the new mtu */
3474		vmxnet3_rq_destroy_all(adapter);
3475		vmxnet3_adjust_rx_ring_size(adapter);
3476		err = vmxnet3_rq_create_all(adapter);
3477		if (err) {
3478			netdev_err(netdev,
3479				   "failed to re-create rx queues, "
3480				   " error %d. Closing it.\n", err);
3481			goto out;
3482		}
3483
3484		err = vmxnet3_activate_dev(adapter);
3485		if (err) {
3486			netdev_err(netdev,
3487				   "failed to re-activate, error %d. "
3488				   "Closing it\n", err);
3489			goto out;
3490		}
3491	}
3492
3493out:
3494	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3495	if (err)
3496		vmxnet3_force_close(adapter);
3497
3498	return err;
3499}
3500
3501
3502static void
3503vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3504{
3505	struct net_device *netdev = adapter->netdev;
3506
3507	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3508		NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3509		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3510		NETIF_F_LRO | NETIF_F_HIGHDMA;
3511
3512	if (VMXNET3_VERSION_GE_4(adapter)) {
3513		netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3514				NETIF_F_GSO_UDP_TUNNEL_CSUM;
3515
3516		netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3517			NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3518			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3519			NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3520			NETIF_F_GSO_UDP_TUNNEL_CSUM;
3521	}
3522
3523	if (VMXNET3_VERSION_GE_7(adapter)) {
3524		unsigned long flags;
3525
3526		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3527					       VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
3528			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
3529		}
3530		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3531					       VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
3532			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
3533		}
3534		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3535					       VMXNET3_CAP_GENEVE_TSO)) {
3536			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
3537		}
3538		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3539					       VMXNET3_CAP_VXLAN_TSO)) {
3540			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
3541		}
3542		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3543					       VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
3544			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
3545		}
3546		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3547					       VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
3548			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
3549		}
3550
3551		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3552		spin_lock_irqsave(&adapter->cmd_lock, flags);
3553		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3554		adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3555		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3556
3557		if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
3558		    !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
3559		    !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
3560		    !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
3561			netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3562			netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3563		}
3564		if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
3565		    !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
3566			netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3567			netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3568		}
3569	}
3570
3571	netdev->vlan_features = netdev->hw_features &
3572				~(NETIF_F_HW_VLAN_CTAG_TX |
3573				  NETIF_F_HW_VLAN_CTAG_RX);
3574	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3575}
3576
3577
3578static void
3579vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3580{
3581	u32 tmp;
3582
3583	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3584	*(u32 *)mac = tmp;
3585
3586	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3587	mac[4] = tmp & 0xff;
3588	mac[5] = (tmp >> 8) & 0xff;
3589}
3590
3591#ifdef CONFIG_PCI_MSI
3592
3593/*
3594 * Enable MSIx vectors.
3595 * Returns :
3596 *	VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3597 *	 were enabled.
3598 *	number of vectors which were enabled otherwise (this number is greater
3599 *	 than VMXNET3_LINUX_MIN_MSIX_VECT)
3600 */
3601
3602static int
3603vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3604{
3605	int ret = pci_enable_msix_range(adapter->pdev,
3606					adapter->intr.msix_entries, nvec, nvec);
3607
3608	if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3609		dev_err(&adapter->netdev->dev,
3610			"Failed to enable %d MSI-X, trying %d\n",
3611			nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3612
3613		ret = pci_enable_msix_range(adapter->pdev,
3614					    adapter->intr.msix_entries,
3615					    VMXNET3_LINUX_MIN_MSIX_VECT,
3616					    VMXNET3_LINUX_MIN_MSIX_VECT);
3617	}
3618
3619	if (ret < 0) {
3620		dev_err(&adapter->netdev->dev,
3621			"Failed to enable MSI-X, error: %d\n", ret);
3622	}
3623
3624	return ret;
3625}
3626
3627
3628#endif /* CONFIG_PCI_MSI */
3629
3630static void
3631vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3632{
3633	u32 cfg;
3634	unsigned long flags;
3635
3636	/* intr settings */
3637	spin_lock_irqsave(&adapter->cmd_lock, flags);
3638	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3639			       VMXNET3_CMD_GET_CONF_INTR);
3640	cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3641	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3642	adapter->intr.type = cfg & 0x3;
3643	adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3644
3645	if (adapter->intr.type == VMXNET3_IT_AUTO) {
3646		adapter->intr.type = VMXNET3_IT_MSIX;
3647	}
3648
3649#ifdef CONFIG_PCI_MSI
3650	if (adapter->intr.type == VMXNET3_IT_MSIX) {
3651		int i, nvec, nvec_allocated;
3652
3653		nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3654			1 : adapter->num_tx_queues;
3655		nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3656			0 : adapter->num_rx_queues;
3657		nvec += 1;	/* for link event */
3658		nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3659		       nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3660
3661		for (i = 0; i < nvec; i++)
3662			adapter->intr.msix_entries[i].entry = i;
3663
3664		nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3665		if (nvec_allocated < 0)
3666			goto msix_err;
3667
3668		/* If we cannot allocate one MSIx vector per queue
3669		 * then limit the number of rx queues to 1
3670		 */
3671		if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
3672		    nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
3673			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3674			    || adapter->num_rx_queues != 1) {
3675				adapter->share_intr = VMXNET3_INTR_TXSHARE;
3676				netdev_err(adapter->netdev,
3677					   "Number of rx queues : 1\n");
3678				adapter->num_rx_queues = 1;
3679			}
3680		}
3681
3682		adapter->intr.num_intrs = nvec_allocated;
3683		return;
3684
3685msix_err:
3686		/* If we cannot allocate MSIx vectors use only one rx queue */
3687		dev_info(&adapter->pdev->dev,
3688			 "Failed to enable MSI-X, error %d. "
3689			 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
3690
3691		adapter->intr.type = VMXNET3_IT_MSI;
3692	}
3693
3694	if (adapter->intr.type == VMXNET3_IT_MSI) {
3695		if (!pci_enable_msi(adapter->pdev)) {
3696			adapter->num_rx_queues = 1;
3697			adapter->intr.num_intrs = 1;
3698			return;
3699		}
3700	}
3701#endif /* CONFIG_PCI_MSI */
3702
3703	adapter->num_rx_queues = 1;
3704	dev_info(&adapter->netdev->dev,
3705		 "Using INTx interrupt, #Rx queues: 1.\n");
3706	adapter->intr.type = VMXNET3_IT_INTX;
3707
3708	/* INT-X related setting */
3709	adapter->intr.num_intrs = 1;
3710}
3711
3712
3713static void
3714vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3715{
3716	if (adapter->intr.type == VMXNET3_IT_MSIX)
3717		pci_disable_msix(adapter->pdev);
3718	else if (adapter->intr.type == VMXNET3_IT_MSI)
3719		pci_disable_msi(adapter->pdev);
3720	else
3721		BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3722}
3723
3724
3725static void
3726vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3727{
3728	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3729	adapter->tx_timeout_count++;
3730
3731	netdev_err(adapter->netdev, "tx hang\n");
3732	schedule_work(&adapter->work);
3733}
3734
3735
3736static void
3737vmxnet3_reset_work(struct work_struct *data)
3738{
3739	struct vmxnet3_adapter *adapter;
3740
3741	adapter = container_of(data, struct vmxnet3_adapter, work);
3742
3743	/* if another thread is resetting the device, no need to proceed */
3744	if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3745		return;
3746
3747	/* if the device is closed, we must leave it alone */
3748	rtnl_lock();
3749	if (netif_running(adapter->netdev)) {
3750		netdev_notice(adapter->netdev, "resetting\n");
3751		vmxnet3_quiesce_dev(adapter);
3752		vmxnet3_reset_dev(adapter);
3753		vmxnet3_activate_dev(adapter);
3754	} else {
3755		netdev_info(adapter->netdev, "already closed\n");
3756	}
3757	rtnl_unlock();
3758
3759	netif_wake_queue(adapter->netdev);
3760	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3761}
3762
3763
3764static int
3765vmxnet3_probe_device(struct pci_dev *pdev,
3766		     const struct pci_device_id *id)
3767{
3768	static const struct net_device_ops vmxnet3_netdev_ops = {
3769		.ndo_open = vmxnet3_open,
3770		.ndo_stop = vmxnet3_close,
3771		.ndo_start_xmit = vmxnet3_xmit_frame,
3772		.ndo_set_mac_address = vmxnet3_set_mac_addr,
3773		.ndo_change_mtu = vmxnet3_change_mtu,
3774		.ndo_fix_features = vmxnet3_fix_features,
3775		.ndo_set_features = vmxnet3_set_features,
3776		.ndo_features_check = vmxnet3_features_check,
3777		.ndo_get_stats64 = vmxnet3_get_stats64,
3778		.ndo_tx_timeout = vmxnet3_tx_timeout,
3779		.ndo_set_rx_mode = vmxnet3_set_mc,
3780		.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3781		.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3782#ifdef CONFIG_NET_POLL_CONTROLLER
3783		.ndo_poll_controller = vmxnet3_netpoll,
3784#endif
3785		.ndo_bpf = vmxnet3_xdp,
3786		.ndo_xdp_xmit = vmxnet3_xdp_xmit,
3787	};
3788	int err;
 
3789	u32 ver;
3790	struct net_device *netdev;
3791	struct vmxnet3_adapter *adapter;
3792	u8 mac[ETH_ALEN];
3793	int size;
3794	int num_tx_queues;
3795	int num_rx_queues;
3796	int queues;
3797	unsigned long flags;
3798
3799	if (!pci_msi_enabled())
3800		enable_mq = 0;
3801
3802#ifdef VMXNET3_RSS
3803	if (enable_mq)
3804		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3805				    (int)num_online_cpus());
3806	else
3807#endif
3808		num_rx_queues = 1;
 
3809
3810	if (enable_mq)
3811		num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3812				    (int)num_online_cpus());
3813	else
3814		num_tx_queues = 1;
3815
 
3816	netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3817				   max(num_tx_queues, num_rx_queues));
 
 
 
 
3818	if (!netdev)
3819		return -ENOMEM;
3820
3821	pci_set_drvdata(pdev, netdev);
3822	adapter = netdev_priv(netdev);
3823	adapter->netdev = netdev;
3824	adapter->pdev = pdev;
3825
3826	adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3827	adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3828	adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3829
3830	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3831	if (err) {
3832		dev_err(&pdev->dev, "dma_set_mask failed\n");
3833		goto err_set_mask;
 
 
 
 
 
 
 
 
 
 
 
 
3834	}
3835
3836	spin_lock_init(&adapter->cmd_lock);
3837	adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3838					     sizeof(struct vmxnet3_adapter),
3839					     DMA_TO_DEVICE);
3840	if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3841		dev_err(&pdev->dev, "Failed to map dma\n");
3842		err = -EFAULT;
3843		goto err_set_mask;
3844	}
3845	adapter->shared = dma_alloc_coherent(
3846				&adapter->pdev->dev,
3847				sizeof(struct Vmxnet3_DriverShared),
3848				&adapter->shared_pa, GFP_KERNEL);
3849	if (!adapter->shared) {
3850		dev_err(&pdev->dev, "Failed to allocate memory\n");
3851		err = -ENOMEM;
3852		goto err_alloc_shared;
3853	}
3854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3855	err = vmxnet3_alloc_pci_resources(adapter);
3856	if (err < 0)
3857		goto err_alloc_pci;
3858
3859	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3860	if (ver & (1 << VMXNET3_REV_7)) {
3861		VMXNET3_WRITE_BAR1_REG(adapter,
3862				       VMXNET3_REG_VRRS,
3863				       1 << VMXNET3_REV_7);
3864		adapter->version = VMXNET3_REV_7 + 1;
3865	} else if (ver & (1 << VMXNET3_REV_6)) {
3866		VMXNET3_WRITE_BAR1_REG(adapter,
3867				       VMXNET3_REG_VRRS,
3868				       1 << VMXNET3_REV_6);
3869		adapter->version = VMXNET3_REV_6 + 1;
3870	} else if (ver & (1 << VMXNET3_REV_5)) {
3871		VMXNET3_WRITE_BAR1_REG(adapter,
3872				       VMXNET3_REG_VRRS,
3873				       1 << VMXNET3_REV_5);
3874		adapter->version = VMXNET3_REV_5 + 1;
3875	} else if (ver & (1 << VMXNET3_REV_4)) {
3876		VMXNET3_WRITE_BAR1_REG(adapter,
3877				       VMXNET3_REG_VRRS,
3878				       1 << VMXNET3_REV_4);
3879		adapter->version = VMXNET3_REV_4 + 1;
3880	} else if (ver & (1 << VMXNET3_REV_3)) {
3881		VMXNET3_WRITE_BAR1_REG(adapter,
3882				       VMXNET3_REG_VRRS,
3883				       1 << VMXNET3_REV_3);
3884		adapter->version = VMXNET3_REV_3 + 1;
3885	} else if (ver & (1 << VMXNET3_REV_2)) {
3886		VMXNET3_WRITE_BAR1_REG(adapter,
3887				       VMXNET3_REG_VRRS,
3888				       1 << VMXNET3_REV_2);
3889		adapter->version = VMXNET3_REV_2 + 1;
3890	} else if (ver & (1 << VMXNET3_REV_1)) {
3891		VMXNET3_WRITE_BAR1_REG(adapter,
3892				       VMXNET3_REG_VRRS,
3893				       1 << VMXNET3_REV_1);
3894		adapter->version = VMXNET3_REV_1 + 1;
3895	} else {
3896		dev_err(&pdev->dev,
3897			"Incompatible h/w version (0x%x) for adapter\n", ver);
3898		err = -EBUSY;
3899		goto err_ver;
3900	}
3901	dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3902
3903	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3904	if (ver & 1) {
3905		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3906	} else {
3907		dev_err(&pdev->dev,
3908			"Incompatible upt version (0x%x) for adapter\n", ver);
3909		err = -EBUSY;
3910		goto err_ver;
3911	}
3912
3913	if (VMXNET3_VERSION_GE_7(adapter)) {
3914		adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
3915		adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
3916		if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3917			adapter->dev_caps[0] = adapter->devcap_supported[0] &
3918							(1UL << VMXNET3_CAP_LARGE_BAR);
3919		}
3920		if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
3921		    adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
3922		    adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
3923			adapter->dev_caps[0] |= adapter->devcap_supported[0] &
3924						(1UL << VMXNET3_CAP_OOORX_COMP);
3925		}
3926		if (adapter->dev_caps[0])
3927			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3928
3929		spin_lock_irqsave(&adapter->cmd_lock, flags);
3930		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3931		adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3932		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3933	}
3934
3935	if (VMXNET3_VERSION_GE_7(adapter) &&
3936	    adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3937		adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
3938		adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
3939		adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
3940	} else {
3941		adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
3942		adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
3943		adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
3944	}
3945
3946	if (VMXNET3_VERSION_GE_6(adapter)) {
3947		spin_lock_irqsave(&adapter->cmd_lock, flags);
3948		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3949				       VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3950		queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3951		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3952		if (queues > 0) {
3953			adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
3954			adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
3955		} else {
3956			adapter->num_rx_queues = min(num_rx_queues,
3957						     VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3958			adapter->num_tx_queues = min(num_tx_queues,
3959						     VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3960		}
3961		if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
3962		    adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
3963			adapter->queuesExtEnabled = true;
3964		} else {
3965			adapter->queuesExtEnabled = false;
3966		}
3967	} else {
3968		adapter->queuesExtEnabled = false;
3969		num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3970		num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3971		adapter->num_rx_queues = min(num_rx_queues,
3972					     VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3973		adapter->num_tx_queues = min(num_tx_queues,
3974					     VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3975	}
3976	dev_info(&pdev->dev,
3977		 "# of Tx queues : %d, # of Rx queues : %d\n",
3978		 adapter->num_tx_queues, adapter->num_rx_queues);
3979
3980	adapter->rx_buf_per_pkt = 1;
3981
3982	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3983	size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3984	adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3985						&adapter->queue_desc_pa,
3986						GFP_KERNEL);
3987
3988	if (!adapter->tqd_start) {
3989		dev_err(&pdev->dev, "Failed to allocate memory\n");
3990		err = -ENOMEM;
3991		goto err_ver;
3992	}
3993	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3994							    adapter->num_tx_queues);
3995
3996	adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3997					      sizeof(struct Vmxnet3_PMConf),
3998					      &adapter->pm_conf_pa,
3999					      GFP_KERNEL);
4000	if (adapter->pm_conf == NULL) {
4001		err = -ENOMEM;
4002		goto err_alloc_pm;
4003	}
4004
4005#ifdef VMXNET3_RSS
4006
4007	adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
4008					       sizeof(struct UPT1_RSSConf),
4009					       &adapter->rss_conf_pa,
4010					       GFP_KERNEL);
4011	if (adapter->rss_conf == NULL) {
4012		err = -ENOMEM;
4013		goto err_alloc_rss;
4014	}
4015#endif /* VMXNET3_RSS */
4016
4017	if (VMXNET3_VERSION_GE_3(adapter)) {
4018		adapter->coal_conf =
4019			dma_alloc_coherent(&adapter->pdev->dev,
4020					   sizeof(struct Vmxnet3_CoalesceScheme)
4021					   ,
4022					   &adapter->coal_conf_pa,
4023					   GFP_KERNEL);
4024		if (!adapter->coal_conf) {
4025			err = -ENOMEM;
4026			goto err_coal_conf;
4027		}
4028		adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
4029		adapter->default_coal_mode = true;
4030	}
4031
4032	if (VMXNET3_VERSION_GE_4(adapter)) {
4033		adapter->default_rss_fields = true;
4034		adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
4035	}
4036
4037	SET_NETDEV_DEV(netdev, &pdev->dev);
4038	vmxnet3_declare_features(adapter);
4039	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
4040			       NETDEV_XDP_ACT_NDO_XMIT;
4041
4042	adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
4043		VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
4044
4045	if (adapter->num_tx_queues == adapter->num_rx_queues)
4046		adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
4047	else
4048		adapter->share_intr = VMXNET3_INTR_DONTSHARE;
4049
4050	vmxnet3_alloc_intr_resources(adapter);
4051
4052#ifdef VMXNET3_RSS
4053	if (adapter->num_rx_queues > 1 &&
4054	    adapter->intr.type == VMXNET3_IT_MSIX) {
4055		adapter->rss = true;
4056		netdev->hw_features |= NETIF_F_RXHASH;
4057		netdev->features |= NETIF_F_RXHASH;
4058		dev_dbg(&pdev->dev, "RSS is enabled.\n");
4059	} else {
4060		adapter->rss = false;
4061	}
4062#endif
4063
4064	vmxnet3_read_mac_addr(adapter, mac);
4065	dev_addr_set(netdev, mac);
4066
4067	netdev->netdev_ops = &vmxnet3_netdev_ops;
4068	vmxnet3_set_ethtool_ops(netdev);
4069	netdev->watchdog_timeo = 5 * HZ;
4070
4071	/* MTU range: 60 - 9190 */
4072	netdev->min_mtu = VMXNET3_MIN_MTU;
4073	if (VMXNET3_VERSION_GE_6(adapter))
4074		netdev->max_mtu = VMXNET3_V6_MAX_MTU;
4075	else
4076		netdev->max_mtu = VMXNET3_MAX_MTU;
4077
4078	INIT_WORK(&adapter->work, vmxnet3_reset_work);
4079	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
4080
4081	if (adapter->intr.type == VMXNET3_IT_MSIX) {
4082		int i;
4083		for (i = 0; i < adapter->num_rx_queues; i++) {
4084			netif_napi_add(adapter->netdev,
4085				       &adapter->rx_queue[i].napi,
4086				       vmxnet3_poll_rx_only);
4087		}
4088	} else {
4089		netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
4090			       vmxnet3_poll);
4091	}
4092
4093	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4094	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
4095
4096	netif_carrier_off(netdev);
4097	err = register_netdev(netdev);
4098
4099	if (err) {
4100		dev_err(&pdev->dev, "Failed to register adapter\n");
4101		goto err_register;
4102	}
4103
4104	vmxnet3_check_link(adapter, false);
4105	return 0;
4106
4107err_register:
4108	if (VMXNET3_VERSION_GE_3(adapter)) {
4109		dma_free_coherent(&adapter->pdev->dev,
4110				  sizeof(struct Vmxnet3_CoalesceScheme),
4111				  adapter->coal_conf, adapter->coal_conf_pa);
4112	}
4113	vmxnet3_free_intr_resources(adapter);
4114err_coal_conf:
 
 
4115#ifdef VMXNET3_RSS
4116	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4117			  adapter->rss_conf, adapter->rss_conf_pa);
4118err_alloc_rss:
4119#endif
4120	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4121			  adapter->pm_conf, adapter->pm_conf_pa);
4122err_alloc_pm:
4123	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4124			  adapter->queue_desc_pa);
4125err_ver:
4126	vmxnet3_free_pci_resources(adapter);
4127err_alloc_pci:
4128	dma_free_coherent(&adapter->pdev->dev,
4129			  sizeof(struct Vmxnet3_DriverShared),
4130			  adapter->shared, adapter->shared_pa);
4131err_alloc_shared:
4132	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4133			 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4134err_set_mask:
4135	free_netdev(netdev);
4136	return err;
4137}
4138
4139
4140static void
4141vmxnet3_remove_device(struct pci_dev *pdev)
4142{
4143	struct net_device *netdev = pci_get_drvdata(pdev);
4144	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4145	int size = 0;
4146	int num_rx_queues, rx_queues;
4147	unsigned long flags;
4148
4149#ifdef VMXNET3_RSS
4150	if (enable_mq)
4151		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
4152				    (int)num_online_cpus());
4153	else
4154#endif
4155		num_rx_queues = 1;
4156	if (!VMXNET3_VERSION_GE_6(adapter)) {
4157		num_rx_queues = rounddown_pow_of_two(num_rx_queues);
4158	}
4159	if (VMXNET3_VERSION_GE_6(adapter)) {
4160		spin_lock_irqsave(&adapter->cmd_lock, flags);
4161		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4162				       VMXNET3_CMD_GET_MAX_QUEUES_CONF);
4163		rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4164		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4165		if (rx_queues > 0)
4166			rx_queues = (rx_queues >> 8) & 0xff;
4167		else
4168			rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4169		num_rx_queues = min(num_rx_queues, rx_queues);
4170	} else {
4171		num_rx_queues = min(num_rx_queues,
4172				    VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4173	}
4174
4175	cancel_work_sync(&adapter->work);
4176
4177	unregister_netdev(netdev);
4178
4179	vmxnet3_free_intr_resources(adapter);
4180	vmxnet3_free_pci_resources(adapter);
4181	if (VMXNET3_VERSION_GE_3(adapter)) {
4182		dma_free_coherent(&adapter->pdev->dev,
4183				  sizeof(struct Vmxnet3_CoalesceScheme),
4184				  adapter->coal_conf, adapter->coal_conf_pa);
4185	}
4186#ifdef VMXNET3_RSS
4187	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4188			  adapter->rss_conf, adapter->rss_conf_pa);
4189#endif
4190	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4191			  adapter->pm_conf, adapter->pm_conf_pa);
4192
4193	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4194	size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
4195	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4196			  adapter->queue_desc_pa);
4197	dma_free_coherent(&adapter->pdev->dev,
4198			  sizeof(struct Vmxnet3_DriverShared),
4199			  adapter->shared, adapter->shared_pa);
4200	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4201			 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4202	free_netdev(netdev);
4203}
4204
4205static void vmxnet3_shutdown_device(struct pci_dev *pdev)
4206{
4207	struct net_device *netdev = pci_get_drvdata(pdev);
4208	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4209	unsigned long flags;
4210
4211	/* Reset_work may be in the middle of resetting the device, wait for its
4212	 * completion.
4213	 */
4214	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
4215		usleep_range(1000, 2000);
4216
4217	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
4218			     &adapter->state)) {
4219		clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4220		return;
4221	}
4222	spin_lock_irqsave(&adapter->cmd_lock, flags);
4223	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4224			       VMXNET3_CMD_QUIESCE_DEV);
4225	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4226	vmxnet3_disable_all_intrs(adapter);
4227
4228	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4229}
4230
4231
4232#ifdef CONFIG_PM
4233
4234static int
4235vmxnet3_suspend(struct device *device)
4236{
4237	struct pci_dev *pdev = to_pci_dev(device);
4238	struct net_device *netdev = pci_get_drvdata(pdev);
4239	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4240	struct Vmxnet3_PMConf *pmConf;
4241	struct ethhdr *ehdr;
4242	struct arphdr *ahdr;
4243	u8 *arpreq;
4244	struct in_device *in_dev;
4245	struct in_ifaddr *ifa;
4246	unsigned long flags;
4247	int i = 0;
4248
4249	if (!netif_running(netdev))
4250		return 0;
4251
4252	for (i = 0; i < adapter->num_rx_queues; i++)
4253		napi_disable(&adapter->rx_queue[i].napi);
4254
4255	vmxnet3_disable_all_intrs(adapter);
4256	vmxnet3_free_irqs(adapter);
4257	vmxnet3_free_intr_resources(adapter);
4258
4259	netif_device_detach(netdev);
 
4260
4261	/* Create wake-up filters. */
4262	pmConf = adapter->pm_conf;
4263	memset(pmConf, 0, sizeof(*pmConf));
4264
4265	if (adapter->wol & WAKE_UCAST) {
4266		pmConf->filters[i].patternSize = ETH_ALEN;
4267		pmConf->filters[i].maskSize = 1;
4268		memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
4269		pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
4270
4271		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4272		i++;
4273	}
4274
4275	if (adapter->wol & WAKE_ARP) {
4276		rcu_read_lock();
4277
4278		in_dev = __in_dev_get_rcu(netdev);
4279		if (!in_dev) {
4280			rcu_read_unlock();
4281			goto skip_arp;
4282		}
4283
4284		ifa = rcu_dereference(in_dev->ifa_list);
4285		if (!ifa) {
4286			rcu_read_unlock();
4287			goto skip_arp;
4288		}
4289
4290		pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
4291			sizeof(struct arphdr) +		/* ARP header */
4292			2 * ETH_ALEN +		/* 2 Ethernet addresses*/
4293			2 * sizeof(u32);	/*2 IPv4 addresses */
4294		pmConf->filters[i].maskSize =
4295			(pmConf->filters[i].patternSize - 1) / 8 + 1;
4296
4297		/* ETH_P_ARP in Ethernet header. */
4298		ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
4299		ehdr->h_proto = htons(ETH_P_ARP);
4300
4301		/* ARPOP_REQUEST in ARP header. */
4302		ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
4303		ahdr->ar_op = htons(ARPOP_REQUEST);
4304		arpreq = (u8 *)(ahdr + 1);
4305
4306		/* The Unicast IPv4 address in 'tip' field. */
4307		arpreq += 2 * ETH_ALEN + sizeof(u32);
4308		*(__be32 *)arpreq = ifa->ifa_address;
4309
4310		rcu_read_unlock();
4311
4312		/* The mask for the relevant bits. */
4313		pmConf->filters[i].mask[0] = 0x00;
4314		pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
4315		pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
4316		pmConf->filters[i].mask[3] = 0x00;
4317		pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
4318		pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
4319
4320		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4321		i++;
4322	}
4323
4324skip_arp:
4325	if (adapter->wol & WAKE_MAGIC)
4326		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
4327
4328	pmConf->numFilters = i;
4329
4330	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
4331	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
4332								  *pmConf));
4333	adapter->shared->devRead.pmConfDesc.confPA =
4334		cpu_to_le64(adapter->pm_conf_pa);
4335
4336	spin_lock_irqsave(&adapter->cmd_lock, flags);
4337	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4338			       VMXNET3_CMD_UPDATE_PMCFG);
4339	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4340
4341	pci_save_state(pdev);
4342	pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
4343			adapter->wol);
4344	pci_disable_device(pdev);
4345	pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
4346
4347	return 0;
4348}
4349
4350
4351static int
4352vmxnet3_resume(struct device *device)
4353{
4354	int err;
4355	unsigned long flags;
4356	struct pci_dev *pdev = to_pci_dev(device);
4357	struct net_device *netdev = pci_get_drvdata(pdev);
4358	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4359
4360	if (!netif_running(netdev))
4361		return 0;
4362
4363	pci_set_power_state(pdev, PCI_D0);
4364	pci_restore_state(pdev);
4365	err = pci_enable_device_mem(pdev);
4366	if (err != 0)
4367		return err;
4368
4369	pci_enable_wake(pdev, PCI_D0, 0);
4370
4371	vmxnet3_alloc_intr_resources(adapter);
4372
4373	/* During hibernate and suspend, device has to be reinitialized as the
4374	 * device state need not be preserved.
4375	 */
4376
4377	/* Need not check adapter state as other reset tasks cannot run during
4378	 * device resume.
4379	 */
4380	spin_lock_irqsave(&adapter->cmd_lock, flags);
4381	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4382			       VMXNET3_CMD_QUIESCE_DEV);
4383	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4384	vmxnet3_tq_cleanup_all(adapter);
4385	vmxnet3_rq_cleanup_all(adapter);
4386
4387	vmxnet3_reset_dev(adapter);
4388	err = vmxnet3_activate_dev(adapter);
4389	if (err != 0) {
4390		netdev_err(netdev,
4391			   "failed to re-activate on resume, error: %d", err);
4392		vmxnet3_force_close(adapter);
4393		return err;
4394	}
4395	netif_device_attach(netdev);
4396
4397	return 0;
4398}
4399
4400static const struct dev_pm_ops vmxnet3_pm_ops = {
4401	.suspend = vmxnet3_suspend,
4402	.resume = vmxnet3_resume,
4403	.freeze = vmxnet3_suspend,
4404	.restore = vmxnet3_resume,
4405};
4406#endif
4407
4408static struct pci_driver vmxnet3_driver = {
4409	.name		= vmxnet3_driver_name,
4410	.id_table	= vmxnet3_pciid_table,
4411	.probe		= vmxnet3_probe_device,
4412	.remove		= vmxnet3_remove_device,
4413	.shutdown	= vmxnet3_shutdown_device,
4414#ifdef CONFIG_PM
4415	.driver.pm	= &vmxnet3_pm_ops,
4416#endif
4417};
4418
4419
4420static int __init
4421vmxnet3_init_module(void)
4422{
4423	pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
4424		VMXNET3_DRIVER_VERSION_REPORT);
4425	return pci_register_driver(&vmxnet3_driver);
4426}
4427
4428module_init(vmxnet3_init_module);
4429
4430
4431static void
4432vmxnet3_exit_module(void)
4433{
4434	pci_unregister_driver(&vmxnet3_driver);
4435}
4436
4437module_exit(vmxnet3_exit_module);
4438
4439MODULE_AUTHOR("VMware, Inc.");
4440MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4441MODULE_LICENSE("GPL v2");
4442MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
v5.4
   1/*
   2 * Linux driver for VMware's vmxnet3 ethernet NIC.
   3 *
   4 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the
   8 * Free Software Foundation; version 2 of the License and no later version.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13 * NON INFRINGEMENT. See the GNU General Public License for more
  14 * details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19 *
  20 * The full GNU General Public License is included in this distribution in
  21 * the file called "COPYING".
  22 *
  23 * Maintained by: pv-drivers@vmware.com
  24 *
  25 */
  26
  27#include <linux/module.h>
  28#include <net/ip6_checksum.h>
  29
  30#include "vmxnet3_int.h"
 
  31
  32char vmxnet3_driver_name[] = "vmxnet3";
  33#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
  34
  35/*
  36 * PCI Device ID Table
  37 * Last entry must be all 0s
  38 */
  39static const struct pci_device_id vmxnet3_pciid_table[] = {
  40	{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
  41	{0}
  42};
  43
  44MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
  45
  46static int enable_mq = 1;
  47
  48static void
  49vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
  50
  51/*
  52 *    Enable/Disable the given intr
  53 */
  54static void
  55vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  56{
  57	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
  58}
  59
  60
  61static void
  62vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  63{
  64	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
  65}
  66
  67
  68/*
  69 *    Enable/Disable all intrs used by the device
  70 */
  71static void
  72vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
  73{
  74	int i;
  75
  76	for (i = 0; i < adapter->intr.num_intrs; i++)
  77		vmxnet3_enable_intr(adapter, i);
  78	adapter->shared->devRead.intrConf.intrCtrl &=
 
 
  79					cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
 
 
 
 
  80}
  81
  82
  83static void
  84vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
  85{
  86	int i;
  87
  88	adapter->shared->devRead.intrConf.intrCtrl |=
 
 
  89					cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
 
 
 
 
  90	for (i = 0; i < adapter->intr.num_intrs; i++)
  91		vmxnet3_disable_intr(adapter, i);
  92}
  93
  94
  95static void
  96vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
  97{
  98	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
  99}
 100
 101
 102static bool
 103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 104{
 105	return tq->stopped;
 106}
 107
 108
 109static void
 110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 111{
 112	tq->stopped = false;
 113	netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
 114}
 115
 116
 117static void
 118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 119{
 120	tq->stopped = false;
 121	netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
 122}
 123
 124
 125static void
 126vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 127{
 128	tq->stopped = true;
 129	tq->num_stop++;
 130	netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
 131}
 132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 133
 134/*
 135 * Check the link state. This may start or stop the tx queue.
 136 */
 137static void
 138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
 139{
 140	u32 ret;
 141	int i;
 142	unsigned long flags;
 143
 144	spin_lock_irqsave(&adapter->cmd_lock, flags);
 145	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
 146	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
 147	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 148
 149	adapter->link_speed = ret >> 16;
 150	if (ret & 1) { /* Link is up. */
 151		netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
 152			    adapter->link_speed);
 153		netif_carrier_on(adapter->netdev);
 154
 155		if (affectTxQueue) {
 156			for (i = 0; i < adapter->num_tx_queues; i++)
 157				vmxnet3_tq_start(&adapter->tx_queue[i],
 158						 adapter);
 159		}
 160	} else {
 161		netdev_info(adapter->netdev, "NIC Link is Down\n");
 162		netif_carrier_off(adapter->netdev);
 163
 164		if (affectTxQueue) {
 165			for (i = 0; i < adapter->num_tx_queues; i++)
 166				vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
 167		}
 168	}
 169}
 170
 171static void
 172vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 173{
 174	int i;
 175	unsigned long flags;
 176	u32 events = le32_to_cpu(adapter->shared->ecr);
 177	if (!events)
 178		return;
 179
 180	vmxnet3_ack_events(adapter, events);
 181
 182	/* Check if link state has changed */
 183	if (events & VMXNET3_ECR_LINK)
 184		vmxnet3_check_link(adapter, true);
 185
 186	/* Check if there is an error on xmit/recv queues */
 187	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
 188		spin_lock_irqsave(&adapter->cmd_lock, flags);
 189		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 190				       VMXNET3_CMD_GET_QUEUE_STATUS);
 191		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 192
 193		for (i = 0; i < adapter->num_tx_queues; i++)
 194			if (adapter->tqd_start[i].status.stopped)
 195				dev_err(&adapter->netdev->dev,
 196					"%s: tq[%d] error 0x%x\n",
 197					adapter->netdev->name, i, le32_to_cpu(
 198					adapter->tqd_start[i].status.error));
 199		for (i = 0; i < adapter->num_rx_queues; i++)
 200			if (adapter->rqd_start[i].status.stopped)
 201				dev_err(&adapter->netdev->dev,
 202					"%s: rq[%d] error 0x%x\n",
 203					adapter->netdev->name, i,
 204					adapter->rqd_start[i].status.error);
 205
 206		schedule_work(&adapter->work);
 207	}
 208}
 209
 210#ifdef __BIG_ENDIAN_BITFIELD
 211/*
 212 * The device expects the bitfields in shared structures to be written in
 213 * little endian. When CPU is big endian, the following routines are used to
 214 * correctly read and write into ABI.
 215 * The general technique used here is : double word bitfields are defined in
 216 * opposite order for big endian architecture. Then before reading them in
 217 * driver the complete double word is translated using le32_to_cpu. Similarly
 218 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
 219 * double words into required format.
 220 * In order to avoid touching bits in shared structure more than once, temporary
 221 * descriptors are used. These are passed as srcDesc to following functions.
 222 */
 223static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
 224				struct Vmxnet3_RxDesc *dstDesc)
 225{
 226	u32 *src = (u32 *)srcDesc + 2;
 227	u32 *dst = (u32 *)dstDesc + 2;
 228	dstDesc->addr = le64_to_cpu(srcDesc->addr);
 229	*dst = le32_to_cpu(*src);
 230	dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
 231}
 232
 233static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
 234			       struct Vmxnet3_TxDesc *dstDesc)
 235{
 236	int i;
 237	u32 *src = (u32 *)(srcDesc + 1);
 238	u32 *dst = (u32 *)(dstDesc + 1);
 239
 240	/* Working backwards so that the gen bit is set at the end. */
 241	for (i = 2; i > 0; i--) {
 242		src--;
 243		dst--;
 244		*dst = cpu_to_le32(*src);
 245	}
 246}
 247
 248
 249static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
 250				struct Vmxnet3_RxCompDesc *dstDesc)
 251{
 252	int i = 0;
 253	u32 *src = (u32 *)srcDesc;
 254	u32 *dst = (u32 *)dstDesc;
 255	for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
 256		*dst = le32_to_cpu(*src);
 257		src++;
 258		dst++;
 259	}
 260}
 261
 262
 263/* Used to read bitfield values from double words. */
 264static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
 265{
 266	u32 temp = le32_to_cpu(*bitfield);
 267	u32 mask = ((1 << size) - 1) << pos;
 268	temp &= mask;
 269	temp >>= pos;
 270	return temp;
 271}
 272
 273
 274
 275#endif  /* __BIG_ENDIAN_BITFIELD */
 276
 277#ifdef __BIG_ENDIAN_BITFIELD
 278
 279#   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
 280			txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
 281			VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
 282#   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
 283			txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
 284			VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
 285#   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
 286			VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
 287			VMXNET3_TCD_GEN_SIZE)
 288#   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
 289			VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
 290#   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
 291			(dstrcd) = (tmp); \
 292			vmxnet3_RxCompToCPU((rcd), (tmp)); \
 293		} while (0)
 294#   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
 295			(dstrxd) = (tmp); \
 296			vmxnet3_RxDescToCPU((rxd), (tmp)); \
 297		} while (0)
 298
 299#else
 300
 301#   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
 302#   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
 303#   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
 304#   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
 305#   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
 306#   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
 307
 308#endif /* __BIG_ENDIAN_BITFIELD  */
 309
 310
 311static void
 312vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
 313		     struct pci_dev *pdev)
 314{
 315	if (tbi->map_type == VMXNET3_MAP_SINGLE)
 
 
 316		dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
 317				 PCI_DMA_TODEVICE);
 318	else if (tbi->map_type == VMXNET3_MAP_PAGE)
 319		dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
 320			       PCI_DMA_TODEVICE);
 321	else
 322		BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
 323
 324	tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
 325}
 326
 327
 328static int
 329vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
 330		  struct pci_dev *pdev,	struct vmxnet3_adapter *adapter)
 
 331{
 332	struct sk_buff *skb;
 333	int entries = 0;
 
 334
 335	/* no out of order completion */
 336	BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
 337	BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
 338
 339	skb = tq->buf_info[eop_idx].skb;
 340	BUG_ON(skb == NULL);
 341	tq->buf_info[eop_idx].skb = NULL;
 342
 343	VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
 344
 345	while (tq->tx_ring.next2comp != eop_idx) {
 346		vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
 347				     pdev);
 348
 349		/* update next2comp w/o tx_lock. Since we are marking more,
 350		 * instead of less, tx ring entries avail, the worst case is
 351		 * that the tx routine incorrectly re-queues a pkt due to
 352		 * insufficient tx ring entries.
 353		 */
 354		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
 355		entries++;
 356	}
 357
 358	dev_kfree_skb_any(skb);
 
 
 
 
 
 
 
 359	return entries;
 360}
 361
 362
 363static int
 364vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
 365			struct vmxnet3_adapter *adapter)
 366{
 
 
 367	int completed = 0;
 368	union Vmxnet3_GenericDesc *gdesc;
 
 
 369
 370	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
 371	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
 372		/* Prevent any &gdesc->tcd field from being (speculatively)
 373		 * read before (&gdesc->tcd)->gen is read.
 374		 */
 375		dma_rmb();
 376
 377		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
 378					       &gdesc->tcd), tq, adapter->pdev,
 379					       adapter);
 380
 381		vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
 382		gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
 383	}
 
 
 384
 385	if (completed) {
 386		spin_lock(&tq->tx_lock);
 387		if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
 388			     vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
 389			     VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
 390			     netif_carrier_ok(adapter->netdev))) {
 391			vmxnet3_tq_wake(tq, adapter);
 392		}
 393		spin_unlock(&tq->tx_lock);
 394	}
 395	return completed;
 396}
 397
 398
 399static void
 400vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
 401		   struct vmxnet3_adapter *adapter)
 402{
 
 
 403	int i;
 404
 
 
 
 405	while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
 406		struct vmxnet3_tx_buf_info *tbi;
 407
 408		tbi = tq->buf_info + tq->tx_ring.next2comp;
 
 409
 410		vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
 411		if (tbi->skb) {
 412			dev_kfree_skb_any(tbi->skb);
 
 
 
 413			tbi->skb = NULL;
 414		}
 415		vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
 416	}
 417
 418	/* sanity check, verify all buffers are indeed unmapped and freed */
 419	for (i = 0; i < tq->tx_ring.size; i++) {
 420		BUG_ON(tq->buf_info[i].skb != NULL ||
 421		       tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
 422	}
 
 423
 424	tq->tx_ring.gen = VMXNET3_INIT_GEN;
 425	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
 426
 427	tq->comp_ring.gen = VMXNET3_INIT_GEN;
 428	tq->comp_ring.next2proc = 0;
 429}
 430
 431
 432static void
 433vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
 434		   struct vmxnet3_adapter *adapter)
 435{
 436	if (tq->tx_ring.base) {
 437		dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
 438				  sizeof(struct Vmxnet3_TxDesc),
 439				  tq->tx_ring.base, tq->tx_ring.basePA);
 440		tq->tx_ring.base = NULL;
 441	}
 442	if (tq->data_ring.base) {
 443		dma_free_coherent(&adapter->pdev->dev,
 444				  tq->data_ring.size * tq->txdata_desc_size,
 445				  tq->data_ring.base, tq->data_ring.basePA);
 446		tq->data_ring.base = NULL;
 447	}
 448	if (tq->comp_ring.base) {
 449		dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
 450				  sizeof(struct Vmxnet3_TxCompDesc),
 451				  tq->comp_ring.base, tq->comp_ring.basePA);
 452		tq->comp_ring.base = NULL;
 453	}
 454	if (tq->buf_info) {
 455		dma_free_coherent(&adapter->pdev->dev,
 456				  tq->tx_ring.size * sizeof(tq->buf_info[0]),
 457				  tq->buf_info, tq->buf_info_pa);
 458		tq->buf_info = NULL;
 459	}
 460}
 461
 462
 463/* Destroy all tx queues */
 464void
 465vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
 466{
 467	int i;
 468
 469	for (i = 0; i < adapter->num_tx_queues; i++)
 470		vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
 471}
 472
 473
 474static void
 475vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
 476		struct vmxnet3_adapter *adapter)
 477{
 478	int i;
 479
 480	/* reset the tx ring contents to 0 and reset the tx ring states */
 481	memset(tq->tx_ring.base, 0, tq->tx_ring.size *
 482	       sizeof(struct Vmxnet3_TxDesc));
 483	tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
 484	tq->tx_ring.gen = VMXNET3_INIT_GEN;
 485
 486	memset(tq->data_ring.base, 0,
 487	       tq->data_ring.size * tq->txdata_desc_size);
 488
 489	/* reset the tx comp ring contents to 0 and reset comp ring states */
 490	memset(tq->comp_ring.base, 0, tq->comp_ring.size *
 491	       sizeof(struct Vmxnet3_TxCompDesc));
 492	tq->comp_ring.next2proc = 0;
 493	tq->comp_ring.gen = VMXNET3_INIT_GEN;
 494
 495	/* reset the bookkeeping data */
 496	memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
 497	for (i = 0; i < tq->tx_ring.size; i++)
 498		tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
 499
 500	/* stats are not reset */
 501}
 502
 503
 504static int
 505vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
 506		  struct vmxnet3_adapter *adapter)
 507{
 508	size_t sz;
 509
 510	BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
 511	       tq->comp_ring.base || tq->buf_info);
 512
 513	tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
 514			tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
 515			&tq->tx_ring.basePA, GFP_KERNEL);
 516	if (!tq->tx_ring.base) {
 517		netdev_err(adapter->netdev, "failed to allocate tx ring\n");
 518		goto err;
 519	}
 520
 521	tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
 522			tq->data_ring.size * tq->txdata_desc_size,
 523			&tq->data_ring.basePA, GFP_KERNEL);
 524	if (!tq->data_ring.base) {
 525		netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
 526		goto err;
 527	}
 528
 529	tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
 530			tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
 531			&tq->comp_ring.basePA, GFP_KERNEL);
 532	if (!tq->comp_ring.base) {
 533		netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
 534		goto err;
 535	}
 536
 537	sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
 538	tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
 539					  &tq->buf_info_pa, GFP_KERNEL);
 540	if (!tq->buf_info)
 541		goto err;
 542
 543	return 0;
 544
 545err:
 546	vmxnet3_tq_destroy(tq, adapter);
 547	return -ENOMEM;
 548}
 549
 550static void
 551vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
 552{
 553	int i;
 554
 555	for (i = 0; i < adapter->num_tx_queues; i++)
 556		vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
 557}
 558
 559/*
 560 *    starting from ring->next2fill, allocate rx buffers for the given ring
 561 *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
 562 *    are allocated or allocation fails
 563 */
 564
 565static int
 566vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
 567			int num_to_alloc, struct vmxnet3_adapter *adapter)
 568{
 569	int num_allocated = 0;
 570	struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
 571	struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
 572	u32 val;
 573
 574	while (num_allocated <= num_to_alloc) {
 575		struct vmxnet3_rx_buf_info *rbi;
 576		union Vmxnet3_GenericDesc *gd;
 577
 578		rbi = rbi_base + ring->next2fill;
 579		gd = ring->base + ring->next2fill;
 
 580
 581		if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
 
 
 
 
 
 
 
 
 
 
 582			if (rbi->skb == NULL) {
 583				rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
 584								       rbi->len,
 585								       GFP_KERNEL);
 586				if (unlikely(rbi->skb == NULL)) {
 587					rq->stats.rx_buf_alloc_failure++;
 588					break;
 589				}
 590
 591				rbi->dma_addr = dma_map_single(
 592						&adapter->pdev->dev,
 593						rbi->skb->data, rbi->len,
 594						PCI_DMA_FROMDEVICE);
 595				if (dma_mapping_error(&adapter->pdev->dev,
 596						      rbi->dma_addr)) {
 597					dev_kfree_skb_any(rbi->skb);
 
 598					rq->stats.rx_buf_alloc_failure++;
 599					break;
 600				}
 601			} else {
 602				/* rx buffer skipped by the device */
 603			}
 604			val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
 605		} else {
 606			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
 607			       rbi->len  != PAGE_SIZE);
 608
 609			if (rbi->page == NULL) {
 610				rbi->page = alloc_page(GFP_ATOMIC);
 611				if (unlikely(rbi->page == NULL)) {
 612					rq->stats.rx_buf_alloc_failure++;
 613					break;
 614				}
 615				rbi->dma_addr = dma_map_page(
 616						&adapter->pdev->dev,
 617						rbi->page, 0, PAGE_SIZE,
 618						PCI_DMA_FROMDEVICE);
 619				if (dma_mapping_error(&adapter->pdev->dev,
 620						      rbi->dma_addr)) {
 621					put_page(rbi->page);
 
 622					rq->stats.rx_buf_alloc_failure++;
 623					break;
 624				}
 625			} else {
 626				/* rx buffers skipped by the device */
 627			}
 628			val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
 629		}
 630
 631		gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
 632		gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
 633					   | val | rbi->len);
 634
 635		/* Fill the last buffer but dont mark it ready, or else the
 636		 * device will think that the queue is full */
 637		if (num_allocated == num_to_alloc)
 
 638			break;
 
 639
 640		gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
 641		num_allocated++;
 642		vmxnet3_cmd_ring_adv_next2fill(ring);
 643	}
 644
 645	netdev_dbg(adapter->netdev,
 646		"alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
 647		num_allocated, ring->next2fill, ring->next2comp);
 648
 649	/* so that the device can distinguish a full ring and an empty ring */
 650	BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
 651
 652	return num_allocated;
 653}
 654
 655
 656static void
 657vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
 658		    struct vmxnet3_rx_buf_info *rbi)
 659{
 660	skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
 661
 662	BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
 663
 664	__skb_frag_set_page(frag, rbi->page);
 665	skb_frag_off_set(frag, 0);
 666	skb_frag_size_set(frag, rcd->len);
 667	skb->data_len += rcd->len;
 668	skb->truesize += PAGE_SIZE;
 669	skb_shinfo(skb)->nr_frags++;
 670}
 671
 672
 673static int
 674vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
 675		struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
 676		struct vmxnet3_adapter *adapter)
 677{
 678	u32 dw2, len;
 679	unsigned long buf_offset;
 680	int i;
 681	union Vmxnet3_GenericDesc *gdesc;
 682	struct vmxnet3_tx_buf_info *tbi = NULL;
 683
 684	BUG_ON(ctx->copy_size > skb_headlen(skb));
 685
 686	/* use the previous gen bit for the SOP desc */
 687	dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
 688
 689	ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
 690	gdesc = ctx->sop_txd; /* both loops below can be skipped */
 691
 692	/* no need to map the buffer if headers are copied */
 693	if (ctx->copy_size) {
 694		ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
 695					tq->tx_ring.next2fill *
 696					tq->txdata_desc_size);
 697		ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
 698		ctx->sop_txd->dword[3] = 0;
 699
 700		tbi = tq->buf_info + tq->tx_ring.next2fill;
 701		tbi->map_type = VMXNET3_MAP_NONE;
 702
 703		netdev_dbg(adapter->netdev,
 704			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
 705			tq->tx_ring.next2fill,
 706			le64_to_cpu(ctx->sop_txd->txd.addr),
 707			ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
 708		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
 709
 710		/* use the right gen for non-SOP desc */
 711		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 712	}
 713
 714	/* linear part can use multiple tx desc if it's big */
 715	len = skb_headlen(skb) - ctx->copy_size;
 716	buf_offset = ctx->copy_size;
 717	while (len) {
 718		u32 buf_size;
 719
 720		if (len < VMXNET3_MAX_TX_BUF_SIZE) {
 721			buf_size = len;
 722			dw2 |= len;
 723		} else {
 724			buf_size = VMXNET3_MAX_TX_BUF_SIZE;
 725			/* spec says that for TxDesc.len, 0 == 2^14 */
 726		}
 727
 728		tbi = tq->buf_info + tq->tx_ring.next2fill;
 729		tbi->map_type = VMXNET3_MAP_SINGLE;
 730		tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
 731				skb->data + buf_offset, buf_size,
 732				PCI_DMA_TODEVICE);
 733		if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
 734			return -EFAULT;
 735
 736		tbi->len = buf_size;
 737
 738		gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
 739		BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
 740
 741		gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
 742		gdesc->dword[2] = cpu_to_le32(dw2);
 743		gdesc->dword[3] = 0;
 744
 745		netdev_dbg(adapter->netdev,
 746			"txd[%u]: 0x%Lx 0x%x 0x%x\n",
 747			tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
 748			le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
 749		vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
 750		dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 751
 752		len -= buf_size;
 753		buf_offset += buf_size;
 754	}
 755
 756	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 757		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 758		u32 buf_size;
 759
 760		buf_offset = 0;
 761		len = skb_frag_size(frag);
 762		while (len) {
 763			tbi = tq->buf_info + tq->tx_ring.next2fill;
 764			if (len < VMXNET3_MAX_TX_BUF_SIZE) {
 765				buf_size = len;
 766				dw2 |= len;
 767			} else {
 768				buf_size = VMXNET3_MAX_TX_BUF_SIZE;
 769				/* spec says that for TxDesc.len, 0 == 2^14 */
 770			}
 771			tbi->map_type = VMXNET3_MAP_PAGE;
 772			tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
 773							 buf_offset, buf_size,
 774							 DMA_TO_DEVICE);
 775			if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
 776				return -EFAULT;
 777
 778			tbi->len = buf_size;
 779
 780			gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
 781			BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
 782
 783			gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
 784			gdesc->dword[2] = cpu_to_le32(dw2);
 785			gdesc->dword[3] = 0;
 786
 787			netdev_dbg(adapter->netdev,
 788				"txd[%u]: 0x%llx %u %u\n",
 789				tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
 790				le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
 791			vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
 792			dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
 793
 794			len -= buf_size;
 795			buf_offset += buf_size;
 796		}
 797	}
 798
 799	ctx->eop_txd = gdesc;
 800
 801	/* set the last buf_info for the pkt */
 802	tbi->skb = skb;
 803	tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
 804
 805	return 0;
 806}
 807
 808
 809/* Init all tx queues */
 810static void
 811vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
 812{
 813	int i;
 814
 815	for (i = 0; i < adapter->num_tx_queues; i++)
 816		vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
 817}
 818
 819
 820/*
 821 *    parse relevant protocol headers:
 822 *      For a tso pkt, relevant headers are L2/3/4 including options
 823 *      For a pkt requesting csum offloading, they are L2/3 and may include L4
 824 *      if it's a TCP/UDP pkt
 825 *
 826 * Returns:
 827 *    -1:  error happens during parsing
 828 *     0:  protocol headers parsed, but too big to be copied
 829 *     1:  protocol headers parsed and copied
 830 *
 831 * Other effects:
 832 *    1. related *ctx fields are updated.
 833 *    2. ctx->copy_size is # of bytes copied
 834 *    3. the portion to be copied is guaranteed to be in the linear part
 835 *
 836 */
 837static int
 838vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 839		  struct vmxnet3_tx_ctx *ctx,
 840		  struct vmxnet3_adapter *adapter)
 841{
 842	u8 protocol = 0;
 843
 844	if (ctx->mss) {	/* TSO */
 845		ctx->eth_ip_hdr_size = skb_transport_offset(skb);
 846		ctx->l4_hdr_size = tcp_hdrlen(skb);
 847		ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
 
 
 
 
 
 
 848	} else {
 849		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 850			ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
 
 
 
 
 851
 852			if (ctx->ipv4) {
 853				const struct iphdr *iph = ip_hdr(skb);
 
 854
 855				protocol = iph->protocol;
 856			} else if (ctx->ipv6) {
 857				const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 
 858
 859				protocol = ipv6h->nexthdr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 860			}
 861
 862			switch (protocol) {
 863			case IPPROTO_TCP:
 864				ctx->l4_hdr_size = tcp_hdrlen(skb);
 
 865				break;
 866			case IPPROTO_UDP:
 867				ctx->l4_hdr_size = sizeof(struct udphdr);
 868				break;
 869			default:
 870				ctx->l4_hdr_size = 0;
 871				break;
 872			}
 873
 874			ctx->copy_size = min(ctx->eth_ip_hdr_size +
 875					 ctx->l4_hdr_size, skb->len);
 876		} else {
 877			ctx->eth_ip_hdr_size = 0;
 878			ctx->l4_hdr_size = 0;
 879			/* copy as much as allowed */
 880			ctx->copy_size = min_t(unsigned int,
 881					       tq->txdata_desc_size,
 882					       skb_headlen(skb));
 883		}
 884
 885		if (skb->len <= VMXNET3_HDR_COPY_SIZE)
 886			ctx->copy_size = skb->len;
 887
 888		/* make sure headers are accessible directly */
 889		if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
 890			goto err;
 891	}
 892
 893	if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
 894		tq->stats.oversized_hdr++;
 895		ctx->copy_size = 0;
 896		return 0;
 897	}
 898
 899	return 1;
 900err:
 901	return -1;
 902}
 903
 904/*
 905 *    copy relevant protocol headers to the transmit ring:
 906 *      For a tso pkt, relevant headers are L2/3/4 including options
 907 *      For a pkt requesting csum offloading, they are L2/3 and may include L4
 908 *      if it's a TCP/UDP pkt
 909 *
 910 *
 911 *    Note that this requires that vmxnet3_parse_hdr be called first to set the
 912 *      appropriate bits in ctx first
 913 */
 914static void
 915vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 916		 struct vmxnet3_tx_ctx *ctx,
 917		 struct vmxnet3_adapter *adapter)
 918{
 919	struct Vmxnet3_TxDataDesc *tdd;
 920
 921	tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
 922					    tq->tx_ring.next2fill *
 923					    tq->txdata_desc_size);
 924
 925	memcpy(tdd->data, skb->data, ctx->copy_size);
 926	netdev_dbg(adapter->netdev,
 927		"copy %u bytes to dataRing[%u]\n",
 928		ctx->copy_size, tq->tx_ring.next2fill);
 929}
 930
 931
 932static void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 933vmxnet3_prepare_tso(struct sk_buff *skb,
 934		    struct vmxnet3_tx_ctx *ctx)
 935{
 936	struct tcphdr *tcph = tcp_hdr(skb);
 937
 938	if (ctx->ipv4) {
 939		struct iphdr *iph = ip_hdr(skb);
 940
 941		iph->check = 0;
 942		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
 943						 IPPROTO_TCP, 0);
 944	} else if (ctx->ipv6) {
 945		struct ipv6hdr *iph = ipv6_hdr(skb);
 946
 947		tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
 948					       IPPROTO_TCP, 0);
 949	}
 950}
 951
 952static int txd_estimate(const struct sk_buff *skb)
 953{
 954	int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
 955	int i;
 956
 957	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 958		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 959
 960		count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
 961	}
 962	return count;
 963}
 964
 965/*
 966 * Transmits a pkt thru a given tq
 967 * Returns:
 968 *    NETDEV_TX_OK:      descriptors are setup successfully
 969 *    NETDEV_TX_OK:      error occurred, the pkt is dropped
 970 *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
 971 *
 972 * Side-effects:
 973 *    1. tx ring may be changed
 974 *    2. tq stats may be updated accordingly
 975 *    3. shared->txNumDeferred may be updated
 976 */
 977
 978static int
 979vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
 980		struct vmxnet3_adapter *adapter, struct net_device *netdev)
 981{
 982	int ret;
 983	u32 count;
 984	int num_pkts;
 985	int tx_num_deferred;
 986	unsigned long flags;
 987	struct vmxnet3_tx_ctx ctx;
 988	union Vmxnet3_GenericDesc *gdesc;
 989#ifdef __BIG_ENDIAN_BITFIELD
 990	/* Use temporary descriptor to avoid touching bits multiple times */
 991	union Vmxnet3_GenericDesc tempTxDesc;
 992#endif
 993
 994	count = txd_estimate(skb);
 995
 996	ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
 997	ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
 998
 999	ctx.mss = skb_shinfo(skb)->gso_size;
1000	if (ctx.mss) {
1001		if (skb_header_cloned(skb)) {
1002			if (unlikely(pskb_expand_head(skb, 0, 0,
1003						      GFP_ATOMIC) != 0)) {
1004				tq->stats.drop_tso++;
1005				goto drop_pkt;
1006			}
1007			tq->stats.copy_skb_header++;
1008		}
1009		vmxnet3_prepare_tso(skb, &ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010	} else {
1011		if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1012
1013			/* non-tso pkts must not use more than
1014			 * VMXNET3_MAX_TXD_PER_PKT entries
1015			 */
1016			if (skb_linearize(skb) != 0) {
1017				tq->stats.drop_too_many_frags++;
1018				goto drop_pkt;
1019			}
1020			tq->stats.linearized++;
1021
1022			/* recalculate the # of descriptors to use */
1023			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1024		}
1025	}
1026
1027	ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1028	if (ret >= 0) {
1029		BUG_ON(ret <= 0 && ctx.copy_size != 0);
1030		/* hdrs parsed, check against other limits */
1031		if (ctx.mss) {
1032			if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
1033				     VMXNET3_MAX_TX_BUF_SIZE)) {
1034				tq->stats.drop_oversized_hdr++;
1035				goto drop_pkt;
1036			}
1037		} else {
1038			if (skb->ip_summed == CHECKSUM_PARTIAL) {
1039				if (unlikely(ctx.eth_ip_hdr_size +
1040					     skb->csum_offset >
1041					     VMXNET3_MAX_CSUM_OFFSET)) {
1042					tq->stats.drop_oversized_hdr++;
1043					goto drop_pkt;
1044				}
1045			}
1046		}
1047	} else {
1048		tq->stats.drop_hdr_inspect_err++;
1049		goto drop_pkt;
1050	}
1051
1052	spin_lock_irqsave(&tq->tx_lock, flags);
1053
1054	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1055		tq->stats.tx_ring_full++;
1056		netdev_dbg(adapter->netdev,
1057			"tx queue stopped on %s, next2comp %u"
1058			" next2fill %u\n", adapter->netdev->name,
1059			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1060
1061		vmxnet3_tq_stop(tq, adapter);
1062		spin_unlock_irqrestore(&tq->tx_lock, flags);
1063		return NETDEV_TX_BUSY;
1064	}
1065
1066
1067	vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1068
1069	/* fill tx descs related to addr & len */
1070	if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1071		goto unlock_drop_pkt;
1072
1073	/* setup the EOP desc */
1074	ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1075
1076	/* setup the SOP desc */
1077#ifdef __BIG_ENDIAN_BITFIELD
1078	gdesc = &tempTxDesc;
1079	gdesc->dword[2] = ctx.sop_txd->dword[2];
1080	gdesc->dword[3] = ctx.sop_txd->dword[3];
1081#else
1082	gdesc = ctx.sop_txd;
1083#endif
1084	tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1085	if (ctx.mss) {
1086		gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1087		gdesc->txd.om = VMXNET3_OM_TSO;
1088		gdesc->txd.msscof = ctx.mss;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1089		num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1090	} else {
1091		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1092			gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1093			gdesc->txd.om = VMXNET3_OM_CSUM;
1094			gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1095					    skb->csum_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1096		} else {
1097			gdesc->txd.om = 0;
1098			gdesc->txd.msscof = 0;
1099		}
1100		num_pkts = 1;
1101	}
1102	le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1103	tx_num_deferred += num_pkts;
1104
1105	if (skb_vlan_tag_present(skb)) {
1106		gdesc->txd.ti = 1;
1107		gdesc->txd.tci = skb_vlan_tag_get(skb);
1108	}
1109
1110	/* Ensure that the write to (&gdesc->txd)->gen will be observed after
1111	 * all other writes to &gdesc->txd.
1112	 */
1113	dma_wmb();
1114
1115	/* finally flips the GEN bit of the SOP desc. */
1116	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1117						  VMXNET3_TXD_GEN);
1118#ifdef __BIG_ENDIAN_BITFIELD
1119	/* Finished updating in bitfields of Tx Desc, so write them in original
1120	 * place.
1121	 */
1122	vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1123			   (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1124	gdesc = ctx.sop_txd;
1125#endif
1126	netdev_dbg(adapter->netdev,
1127		"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1128		(u32)(ctx.sop_txd -
1129		tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1130		le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1131
1132	spin_unlock_irqrestore(&tq->tx_lock, flags);
1133
1134	if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1135		tq->shared->txNumDeferred = 0;
1136		VMXNET3_WRITE_BAR0_REG(adapter,
1137				       VMXNET3_REG_TXPROD + tq->qid * 8,
1138				       tq->tx_ring.next2fill);
1139	}
1140
1141	return NETDEV_TX_OK;
1142
1143unlock_drop_pkt:
1144	spin_unlock_irqrestore(&tq->tx_lock, flags);
1145drop_pkt:
1146	tq->stats.drop_total++;
1147	dev_kfree_skb_any(skb);
1148	return NETDEV_TX_OK;
1149}
1150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151
1152static netdev_tx_t
1153vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1154{
1155	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1156
1157	BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1158	return vmxnet3_tq_xmit(skb,
1159			       &adapter->tx_queue[skb->queue_mapping],
1160			       adapter, netdev);
1161}
1162
1163
1164static void
1165vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1166		struct sk_buff *skb,
1167		union Vmxnet3_GenericDesc *gdesc)
1168{
1169	if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1170		if (gdesc->rcd.v4 &&
1171		    (le32_to_cpu(gdesc->dword[3]) &
1172		     VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1173			skb->ip_summed = CHECKSUM_UNNECESSARY;
1174			BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1175			BUG_ON(gdesc->rcd.frg);
 
 
 
 
 
 
 
 
1176		} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1177					     (1 << VMXNET3_RCD_TUC_SHIFT))) {
1178			skb->ip_summed = CHECKSUM_UNNECESSARY;
1179			BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1180			BUG_ON(gdesc->rcd.frg);
 
 
 
 
 
 
 
 
1181		} else {
1182			if (gdesc->rcd.csum) {
1183				skb->csum = htons(gdesc->rcd.csum);
1184				skb->ip_summed = CHECKSUM_PARTIAL;
1185			} else {
1186				skb_checksum_none_assert(skb);
1187			}
1188		}
1189	} else {
1190		skb_checksum_none_assert(skb);
1191	}
1192}
1193
1194
1195static void
1196vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1197		 struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
1198{
1199	rq->stats.drop_err++;
1200	if (!rcd->fcs)
1201		rq->stats.drop_fcs++;
1202
1203	rq->stats.drop_total++;
1204
1205	/*
1206	 * We do not unmap and chain the rx buffer to the skb.
1207	 * We basically pretend this buffer is not used and will be recycled
1208	 * by vmxnet3_rq_alloc_rx_buf()
1209	 */
1210
1211	/*
1212	 * ctx->skb may be NULL if this is the first and the only one
1213	 * desc for the pkt
1214	 */
1215	if (ctx->skb)
1216		dev_kfree_skb_irq(ctx->skb);
1217
1218	ctx->skb = NULL;
1219}
1220
1221
1222static u32
1223vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1224		    union Vmxnet3_GenericDesc *gdesc)
1225{
1226	u32 hlen, maplen;
1227	union {
1228		void *ptr;
1229		struct ethhdr *eth;
1230		struct vlan_ethhdr *veth;
1231		struct iphdr *ipv4;
1232		struct ipv6hdr *ipv6;
1233		struct tcphdr *tcp;
1234	} hdr;
1235	BUG_ON(gdesc->rcd.tcp == 0);
1236
1237	maplen = skb_headlen(skb);
1238	if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1239		return 0;
1240
1241	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1242	    skb->protocol == cpu_to_be16(ETH_P_8021AD))
1243		hlen = sizeof(struct vlan_ethhdr);
1244	else
1245		hlen = sizeof(struct ethhdr);
1246
1247	hdr.eth = eth_hdr(skb);
1248	if (gdesc->rcd.v4) {
1249		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1250		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1251		hdr.ptr += hlen;
1252		BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1253		hlen = hdr.ipv4->ihl << 2;
1254		hdr.ptr += hdr.ipv4->ihl << 2;
1255	} else if (gdesc->rcd.v6) {
1256		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1257		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1258		hdr.ptr += hlen;
1259		/* Use an estimated value, since we also need to handle
1260		 * TSO case.
1261		 */
1262		if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1263			return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1264		hlen = sizeof(struct ipv6hdr);
1265		hdr.ptr += sizeof(struct ipv6hdr);
1266	} else {
1267		/* Non-IP pkt, dont estimate header length */
1268		return 0;
1269	}
1270
1271	if (hlen + sizeof(struct tcphdr) > maplen)
1272		return 0;
1273
1274	return (hlen + (hdr.tcp->doff << 2));
1275}
1276
1277static int
1278vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1279		       struct vmxnet3_adapter *adapter, int quota)
1280{
1281	static const u32 rxprod_reg[2] = {
1282		VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1283	};
1284	u32 num_pkts = 0;
1285	bool skip_page_frags = false;
 
1286	struct Vmxnet3_RxCompDesc *rcd;
1287	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1288	u16 segCnt = 0, mss = 0;
 
1289#ifdef __BIG_ENDIAN_BITFIELD
1290	struct Vmxnet3_RxDesc rxCmdDesc;
1291	struct Vmxnet3_RxCompDesc rxComp;
1292#endif
 
 
1293	vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1294			  &rxComp);
1295	while (rcd->gen == rq->comp_ring.gen) {
1296		struct vmxnet3_rx_buf_info *rbi;
1297		struct sk_buff *skb, *new_skb = NULL;
1298		struct page *new_page = NULL;
1299		dma_addr_t new_dma_addr;
1300		int num_to_alloc;
1301		struct Vmxnet3_RxDesc *rxd;
1302		u32 idx, ring_idx;
1303		struct vmxnet3_cmd_ring	*ring = NULL;
1304		if (num_pkts >= quota) {
1305			/* we may stop even before we see the EOP desc of
1306			 * the current pkt
1307			 */
1308			break;
1309		}
1310
1311		/* Prevent any rcd field from being (speculatively) read before
1312		 * rcd->gen is read.
1313		 */
1314		dma_rmb();
1315
1316		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1317		       rcd->rqID != rq->dataRingQid);
1318		idx = rcd->rxdIdx;
1319		ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1320		ring = rq->rx_ring + ring_idx;
1321		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1322				  &rxCmdDesc);
1323		rbi = rq->buf_info[ring_idx] + idx;
1324
1325		BUG_ON(rxd->addr != rbi->dma_addr ||
1326		       rxd->len != rbi->len);
1327
1328		if (unlikely(rcd->eop && rcd->err)) {
1329			vmxnet3_rx_error(rq, rcd, ctx, adapter);
1330			goto rcd_done;
1331		}
1332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1333		if (rcd->sop) { /* first buf of the pkt */
1334			bool rxDataRingUsed;
1335			u16 len;
1336
1337			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1338			       (rcd->rqID != rq->qid &&
1339				rcd->rqID != rq->dataRingQid));
1340
1341			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
 
1342			BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1343
1344			if (unlikely(rcd->len == 0)) {
1345				/* Pretend the rx buffer is skipped. */
1346				BUG_ON(!(rcd->sop && rcd->eop));
1347				netdev_dbg(adapter->netdev,
1348					"rxRing[%u][%u] 0 length\n",
1349					ring_idx, idx);
1350				goto rcd_done;
1351			}
1352
1353			skip_page_frags = false;
1354			ctx->skb = rbi->skb;
1355
1356			rxDataRingUsed =
1357				VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1358			len = rxDataRingUsed ? rcd->len : rbi->len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1359			new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1360							    len);
1361			if (new_skb == NULL) {
1362				/* Skb allocation failed, do not handover this
1363				 * skb to stack. Reuse it. Drop the existing pkt
1364				 */
1365				rq->stats.rx_buf_alloc_failure++;
1366				ctx->skb = NULL;
1367				rq->stats.drop_total++;
1368				skip_page_frags = true;
1369				goto rcd_done;
1370			}
1371
1372			if (rxDataRingUsed) {
1373				size_t sz;
1374
1375				BUG_ON(rcd->len > rq->data_ring.desc_size);
1376
1377				ctx->skb = new_skb;
1378				sz = rcd->rxdIdx * rq->data_ring.desc_size;
1379				memcpy(new_skb->data,
1380				       &rq->data_ring.base[sz], rcd->len);
1381			} else {
1382				ctx->skb = rbi->skb;
1383
1384				new_dma_addr =
1385					dma_map_single(&adapter->pdev->dev,
1386						       new_skb->data, rbi->len,
1387						       PCI_DMA_FROMDEVICE);
1388				if (dma_mapping_error(&adapter->pdev->dev,
1389						      new_dma_addr)) {
1390					dev_kfree_skb(new_skb);
1391					/* Skb allocation failed, do not
1392					 * handover this skb to stack. Reuse
1393					 * it. Drop the existing pkt.
1394					 */
1395					rq->stats.rx_buf_alloc_failure++;
1396					ctx->skb = NULL;
1397					rq->stats.drop_total++;
1398					skip_page_frags = true;
1399					goto rcd_done;
1400				}
1401
1402				dma_unmap_single(&adapter->pdev->dev,
1403						 rbi->dma_addr,
1404						 rbi->len,
1405						 PCI_DMA_FROMDEVICE);
1406
1407				/* Immediate refill */
1408				rbi->skb = new_skb;
1409				rbi->dma_addr = new_dma_addr;
1410				rxd->addr = cpu_to_le64(rbi->dma_addr);
1411				rxd->len = rbi->len;
1412			}
1413
1414#ifdef VMXNET3_RSS
1415			if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1416			    (adapter->netdev->features & NETIF_F_RXHASH))
1417				skb_set_hash(ctx->skb,
1418					     le32_to_cpu(rcd->rssHash),
1419					     PKT_HASH_TYPE_L3);
1420#endif
1421			skb_put(ctx->skb, rcd->len);
1422
1423			if (VMXNET3_VERSION_GE_2(adapter) &&
1424			    rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1425				struct Vmxnet3_RxCompDescExt *rcdlro;
 
 
1426				rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
 
1427
1428				segCnt = rcdlro->segCnt;
1429				WARN_ON_ONCE(segCnt == 0);
1430				mss = rcdlro->mss;
1431				if (unlikely(segCnt <= 1))
1432					segCnt = 0;
 
 
1433			} else {
1434				segCnt = 0;
1435			}
1436		} else {
1437			BUG_ON(ctx->skb == NULL && !skip_page_frags);
1438
1439			/* non SOP buffer must be type 1 in most cases */
1440			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1441			BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1442
1443			/* If an sop buffer was dropped, skip all
1444			 * following non-sop fragments. They will be reused.
1445			 */
1446			if (skip_page_frags)
1447				goto rcd_done;
1448
1449			if (rcd->len) {
1450				new_page = alloc_page(GFP_ATOMIC);
1451				/* Replacement page frag could not be allocated.
1452				 * Reuse this page. Drop the pkt and free the
1453				 * skb which contained this page as a frag. Skip
1454				 * processing all the following non-sop frags.
1455				 */
1456				if (unlikely(!new_page)) {
1457					rq->stats.rx_buf_alloc_failure++;
1458					dev_kfree_skb(ctx->skb);
1459					ctx->skb = NULL;
1460					skip_page_frags = true;
1461					goto rcd_done;
1462				}
1463				new_dma_addr = dma_map_page(&adapter->pdev->dev,
1464							    new_page,
1465							    0, PAGE_SIZE,
1466							    PCI_DMA_FROMDEVICE);
1467				if (dma_mapping_error(&adapter->pdev->dev,
1468						      new_dma_addr)) {
1469					put_page(new_page);
1470					rq->stats.rx_buf_alloc_failure++;
1471					dev_kfree_skb(ctx->skb);
1472					ctx->skb = NULL;
1473					skip_page_frags = true;
1474					goto rcd_done;
1475				}
1476
1477				dma_unmap_page(&adapter->pdev->dev,
1478					       rbi->dma_addr, rbi->len,
1479					       PCI_DMA_FROMDEVICE);
1480
1481				vmxnet3_append_frag(ctx->skb, rcd, rbi);
1482
1483				/* Immediate refill */
1484				rbi->page = new_page;
1485				rbi->dma_addr = new_dma_addr;
1486				rxd->addr = cpu_to_le64(rbi->dma_addr);
1487				rxd->len = rbi->len;
1488			}
1489		}
1490
1491
 
1492		skb = ctx->skb;
1493		if (rcd->eop) {
1494			u32 mtu = adapter->netdev->mtu;
1495			skb->len += skb->data_len;
1496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1497			vmxnet3_rx_csum(adapter, skb,
1498					(union Vmxnet3_GenericDesc *)rcd);
1499			skb->protocol = eth_type_trans(skb, adapter->netdev);
1500			if (!rcd->tcp ||
1501			    !(adapter->netdev->features & NETIF_F_LRO))
1502				goto not_lro;
1503
1504			if (segCnt != 0 && mss != 0) {
1505				skb_shinfo(skb)->gso_type = rcd->v4 ?
1506					SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1507				skb_shinfo(skb)->gso_size = mss;
1508				skb_shinfo(skb)->gso_segs = segCnt;
1509			} else if (segCnt != 0 || skb->len > mtu) {
1510				u32 hlen;
1511
1512				hlen = vmxnet3_get_hdr_len(adapter, skb,
1513					(union Vmxnet3_GenericDesc *)rcd);
1514				if (hlen == 0)
1515					goto not_lro;
1516
1517				skb_shinfo(skb)->gso_type =
1518					rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1519				if (segCnt != 0) {
1520					skb_shinfo(skb)->gso_segs = segCnt;
1521					skb_shinfo(skb)->gso_size =
1522						DIV_ROUND_UP(skb->len -
1523							hlen, segCnt);
1524				} else {
1525					skb_shinfo(skb)->gso_size = mtu - hlen;
1526				}
1527			}
1528not_lro:
1529			if (unlikely(rcd->ts))
1530				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1531
1532			if (adapter->netdev->features & NETIF_F_LRO)
 
 
1533				netif_receive_skb(skb);
1534			else
1535				napi_gro_receive(&rq->napi, skb);
1536
1537			ctx->skb = NULL;
 
1538			num_pkts++;
1539		}
1540
1541rcd_done:
1542		/* device may have skipped some rx descs */
1543		ring->next2comp = idx;
 
 
 
 
 
 
 
1544		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1545		ring = rq->rx_ring + ring_idx;
1546
1547		/* Ensure that the writes to rxd->gen bits will be observed
1548		 * after all other writes to rxd objects.
1549		 */
1550		dma_wmb();
1551
1552		while (num_to_alloc) {
1553			vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1554					  &rxCmdDesc);
1555			BUG_ON(!rxd->addr);
1556
1557			/* Recv desc is ready to be used by the device */
1558			rxd->gen = ring->gen;
1559			vmxnet3_cmd_ring_adv_next2fill(ring);
1560			num_to_alloc--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1561		}
1562
1563		/* if needed, update the register */
1564		if (unlikely(rq->shared->updateRxProd)) {
1565			VMXNET3_WRITE_BAR0_REG(adapter,
1566					       rxprod_reg[ring_idx] + rq->qid * 8,
1567					       ring->next2fill);
1568		}
1569
1570		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1571		vmxnet3_getRxComp(rcd,
1572				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1573	}
 
 
1574
1575	return num_pkts;
1576}
1577
1578
1579static void
1580vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1581		   struct vmxnet3_adapter *adapter)
1582{
1583	u32 i, ring_idx;
1584	struct Vmxnet3_RxDesc *rxd;
1585
 
 
 
 
1586	for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1587		for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
 
1588#ifdef __BIG_ENDIAN_BITFIELD
1589			struct Vmxnet3_RxDesc rxDesc;
1590#endif
 
 
1591			vmxnet3_getRxDesc(rxd,
1592				&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1593
1594			if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1595					rq->buf_info[ring_idx][i].skb) {
 
 
 
 
 
1596				dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1597						 rxd->len, PCI_DMA_FROMDEVICE);
1598				dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1599				rq->buf_info[ring_idx][i].skb = NULL;
1600			} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1601					rq->buf_info[ring_idx][i].page) {
1602				dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1603					       rxd->len, PCI_DMA_FROMDEVICE);
1604				put_page(rq->buf_info[ring_idx][i].page);
1605				rq->buf_info[ring_idx][i].page = NULL;
1606			}
1607		}
1608
1609		rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1610		rq->rx_ring[ring_idx].next2fill =
1611					rq->rx_ring[ring_idx].next2comp = 0;
1612	}
1613
1614	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1615	rq->comp_ring.next2proc = 0;
1616}
1617
1618
1619static void
1620vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1621{
1622	int i;
1623
1624	for (i = 0; i < adapter->num_rx_queues; i++)
1625		vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
 
1626}
1627
1628
1629static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1630			       struct vmxnet3_adapter *adapter)
1631{
1632	int i;
1633	int j;
1634
1635	/* all rx buffers must have already been freed */
1636	for (i = 0; i < 2; i++) {
1637		if (rq->buf_info[i]) {
1638			for (j = 0; j < rq->rx_ring[i].size; j++)
1639				BUG_ON(rq->buf_info[i][j].page != NULL);
1640		}
1641	}
1642
1643
1644	for (i = 0; i < 2; i++) {
1645		if (rq->rx_ring[i].base) {
1646			dma_free_coherent(&adapter->pdev->dev,
1647					  rq->rx_ring[i].size
1648					  * sizeof(struct Vmxnet3_RxDesc),
1649					  rq->rx_ring[i].base,
1650					  rq->rx_ring[i].basePA);
1651			rq->rx_ring[i].base = NULL;
1652		}
1653	}
1654
 
 
 
 
 
1655	if (rq->data_ring.base) {
1656		dma_free_coherent(&adapter->pdev->dev,
1657				  rq->rx_ring[0].size * rq->data_ring.desc_size,
1658				  rq->data_ring.base, rq->data_ring.basePA);
1659		rq->data_ring.base = NULL;
1660	}
1661
1662	if (rq->comp_ring.base) {
1663		dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1664				  * sizeof(struct Vmxnet3_RxCompDesc),
1665				  rq->comp_ring.base, rq->comp_ring.basePA);
1666		rq->comp_ring.base = NULL;
1667	}
1668
1669	if (rq->buf_info[0]) {
1670		size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1671			(rq->rx_ring[0].size + rq->rx_ring[1].size);
1672		dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1673				  rq->buf_info_pa);
1674		rq->buf_info[0] = rq->buf_info[1] = NULL;
1675	}
1676}
1677
1678static void
1679vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1680{
1681	int i;
1682
1683	for (i = 0; i < adapter->num_rx_queues; i++) {
1684		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1685
1686		if (rq->data_ring.base) {
1687			dma_free_coherent(&adapter->pdev->dev,
1688					  (rq->rx_ring[0].size *
1689					  rq->data_ring.desc_size),
1690					  rq->data_ring.base,
1691					  rq->data_ring.basePA);
1692			rq->data_ring.base = NULL;
1693			rq->data_ring.desc_size = 0;
1694		}
1695	}
1696}
1697
1698static int
1699vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1700		struct vmxnet3_adapter  *adapter)
1701{
1702	int i;
1703
1704	/* initialize buf_info */
1705	for (i = 0; i < rq->rx_ring[0].size; i++) {
1706
1707		/* 1st buf for a pkt is skbuff */
1708		if (i % adapter->rx_buf_per_pkt == 0) {
1709			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
 
 
1710			rq->buf_info[0][i].len = adapter->skb_buf_size;
1711		} else { /* subsequent bufs for a pkt is frag */
1712			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1713			rq->buf_info[0][i].len = PAGE_SIZE;
1714		}
1715	}
1716	for (i = 0; i < rq->rx_ring[1].size; i++) {
1717		rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1718		rq->buf_info[1][i].len = PAGE_SIZE;
1719	}
1720
1721	/* reset internal state and allocate buffers for both rings */
1722	for (i = 0; i < 2; i++) {
1723		rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1724
1725		memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1726		       sizeof(struct Vmxnet3_RxDesc));
1727		rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
 
1728	}
 
 
 
 
 
 
1729	if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1730				    adapter) == 0) {
 
 
 
 
1731		/* at least has 1 rx buffer for the 1st ring */
1732		return -ENOMEM;
1733	}
1734	vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1735
1736	/* reset the comp ring */
1737	rq->comp_ring.next2proc = 0;
1738	memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1739	       sizeof(struct Vmxnet3_RxCompDesc));
1740	rq->comp_ring.gen = VMXNET3_INIT_GEN;
1741
1742	/* reset rxctx */
1743	rq->rx_ctx.skb = NULL;
1744
1745	/* stats are not reset */
1746	return 0;
1747}
1748
1749
1750static int
1751vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1752{
1753	int i, err = 0;
1754
1755	for (i = 0; i < adapter->num_rx_queues; i++) {
1756		err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1757		if (unlikely(err)) {
1758			dev_err(&adapter->netdev->dev, "%s: failed to "
1759				"initialize rx queue%i\n",
1760				adapter->netdev->name, i);
1761			break;
1762		}
1763	}
1764	return err;
1765
1766}
1767
1768
1769static int
1770vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1771{
1772	int i;
1773	size_t sz;
1774	struct vmxnet3_rx_buf_info *bi;
1775
1776	for (i = 0; i < 2; i++) {
1777
1778		sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1779		rq->rx_ring[i].base = dma_alloc_coherent(
1780						&adapter->pdev->dev, sz,
1781						&rq->rx_ring[i].basePA,
1782						GFP_KERNEL);
1783		if (!rq->rx_ring[i].base) {
1784			netdev_err(adapter->netdev,
1785				   "failed to allocate rx ring %d\n", i);
1786			goto err;
1787		}
1788	}
1789
1790	if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1791		sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1792		rq->data_ring.base =
1793			dma_alloc_coherent(&adapter->pdev->dev, sz,
1794					   &rq->data_ring.basePA,
1795					   GFP_KERNEL);
1796		if (!rq->data_ring.base) {
1797			netdev_err(adapter->netdev,
1798				   "rx data ring will be disabled\n");
1799			adapter->rxdataring_enabled = false;
1800		}
1801	} else {
1802		rq->data_ring.base = NULL;
1803		rq->data_ring.desc_size = 0;
1804	}
1805
1806	sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1807	rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1808						&rq->comp_ring.basePA,
1809						GFP_KERNEL);
1810	if (!rq->comp_ring.base) {
1811		netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1812		goto err;
1813	}
1814
1815	sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1816						   rq->rx_ring[1].size);
1817	bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1818				GFP_KERNEL);
1819	if (!bi)
1820		goto err;
1821
1822	rq->buf_info[0] = bi;
1823	rq->buf_info[1] = bi + rq->rx_ring[0].size;
1824
1825	return 0;
1826
1827err:
1828	vmxnet3_rq_destroy(rq, adapter);
1829	return -ENOMEM;
1830}
1831
1832
1833static int
1834vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1835{
1836	int i, err = 0;
1837
1838	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
1839
1840	for (i = 0; i < adapter->num_rx_queues; i++) {
1841		err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1842		if (unlikely(err)) {
1843			dev_err(&adapter->netdev->dev,
1844				"%s: failed to create rx queue%i\n",
1845				adapter->netdev->name, i);
1846			goto err_out;
1847		}
1848	}
1849
1850	if (!adapter->rxdataring_enabled)
1851		vmxnet3_rq_destroy_all_rxdataring(adapter);
1852
1853	return err;
1854err_out:
1855	vmxnet3_rq_destroy_all(adapter);
1856	return err;
1857
1858}
1859
1860/* Multiple queue aware polling function for tx and rx */
1861
1862static int
1863vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1864{
1865	int rcd_done = 0, i;
1866	if (unlikely(adapter->shared->ecr))
1867		vmxnet3_process_events(adapter);
1868	for (i = 0; i < adapter->num_tx_queues; i++)
1869		vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1870
1871	for (i = 0; i < adapter->num_rx_queues; i++)
1872		rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1873						   adapter, budget);
1874	return rcd_done;
1875}
1876
1877
1878static int
1879vmxnet3_poll(struct napi_struct *napi, int budget)
1880{
1881	struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1882					  struct vmxnet3_rx_queue, napi);
1883	int rxd_done;
1884
1885	rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1886
1887	if (rxd_done < budget) {
1888		napi_complete_done(napi, rxd_done);
1889		vmxnet3_enable_all_intrs(rx_queue->adapter);
1890	}
1891	return rxd_done;
1892}
1893
1894/*
1895 * NAPI polling function for MSI-X mode with multiple Rx queues
1896 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1897 */
1898
1899static int
1900vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1901{
1902	struct vmxnet3_rx_queue *rq = container_of(napi,
1903						struct vmxnet3_rx_queue, napi);
1904	struct vmxnet3_adapter *adapter = rq->adapter;
1905	int rxd_done;
1906
1907	/* When sharing interrupt with corresponding tx queue, process
1908	 * tx completions in that queue as well
1909	 */
1910	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1911		struct vmxnet3_tx_queue *tq =
1912				&adapter->tx_queue[rq - adapter->rx_queue];
1913		vmxnet3_tq_tx_complete(tq, adapter);
1914	}
1915
1916	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1917
1918	if (rxd_done < budget) {
1919		napi_complete_done(napi, rxd_done);
1920		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1921	}
1922	return rxd_done;
1923}
1924
1925
1926#ifdef CONFIG_PCI_MSI
1927
1928/*
1929 * Handle completion interrupts on tx queues
1930 * Returns whether or not the intr is handled
1931 */
1932
1933static irqreturn_t
1934vmxnet3_msix_tx(int irq, void *data)
1935{
1936	struct vmxnet3_tx_queue *tq = data;
1937	struct vmxnet3_adapter *adapter = tq->adapter;
1938
1939	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1940		vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1941
1942	/* Handle the case where only one irq is allocate for all tx queues */
1943	if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1944		int i;
1945		for (i = 0; i < adapter->num_tx_queues; i++) {
1946			struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1947			vmxnet3_tq_tx_complete(txq, adapter);
1948		}
1949	} else {
1950		vmxnet3_tq_tx_complete(tq, adapter);
1951	}
1952	vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1953
1954	return IRQ_HANDLED;
1955}
1956
1957
1958/*
1959 * Handle completion interrupts on rx queues. Returns whether or not the
1960 * intr is handled
1961 */
1962
1963static irqreturn_t
1964vmxnet3_msix_rx(int irq, void *data)
1965{
1966	struct vmxnet3_rx_queue *rq = data;
1967	struct vmxnet3_adapter *adapter = rq->adapter;
1968
1969	/* disable intr if needed */
1970	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1971		vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1972	napi_schedule(&rq->napi);
1973
1974	return IRQ_HANDLED;
1975}
1976
1977/*
1978 *----------------------------------------------------------------------------
1979 *
1980 * vmxnet3_msix_event --
1981 *
1982 *    vmxnet3 msix event intr handler
1983 *
1984 * Result:
1985 *    whether or not the intr is handled
1986 *
1987 *----------------------------------------------------------------------------
1988 */
1989
1990static irqreturn_t
1991vmxnet3_msix_event(int irq, void *data)
1992{
1993	struct net_device *dev = data;
1994	struct vmxnet3_adapter *adapter = netdev_priv(dev);
1995
1996	/* disable intr if needed */
1997	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1998		vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1999
2000	if (adapter->shared->ecr)
2001		vmxnet3_process_events(adapter);
2002
2003	vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2004
2005	return IRQ_HANDLED;
2006}
2007
2008#endif /* CONFIG_PCI_MSI  */
2009
2010
2011/* Interrupt handler for vmxnet3  */
2012static irqreturn_t
2013vmxnet3_intr(int irq, void *dev_id)
2014{
2015	struct net_device *dev = dev_id;
2016	struct vmxnet3_adapter *adapter = netdev_priv(dev);
2017
2018	if (adapter->intr.type == VMXNET3_IT_INTX) {
2019		u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2020		if (unlikely(icr == 0))
2021			/* not ours */
2022			return IRQ_NONE;
2023	}
2024
2025
2026	/* disable intr if needed */
2027	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2028		vmxnet3_disable_all_intrs(adapter);
2029
2030	napi_schedule(&adapter->rx_queue[0].napi);
2031
2032	return IRQ_HANDLED;
2033}
2034
2035#ifdef CONFIG_NET_POLL_CONTROLLER
2036
2037/* netpoll callback. */
2038static void
2039vmxnet3_netpoll(struct net_device *netdev)
2040{
2041	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2042
2043	switch (adapter->intr.type) {
2044#ifdef CONFIG_PCI_MSI
2045	case VMXNET3_IT_MSIX: {
2046		int i;
2047		for (i = 0; i < adapter->num_rx_queues; i++)
2048			vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2049		break;
2050	}
2051#endif
2052	case VMXNET3_IT_MSI:
2053	default:
2054		vmxnet3_intr(0, adapter->netdev);
2055		break;
2056	}
2057
2058}
2059#endif	/* CONFIG_NET_POLL_CONTROLLER */
2060
2061static int
2062vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2063{
2064	struct vmxnet3_intr *intr = &adapter->intr;
2065	int err = 0, i;
2066	int vector = 0;
2067
2068#ifdef CONFIG_PCI_MSI
2069	if (adapter->intr.type == VMXNET3_IT_MSIX) {
2070		for (i = 0; i < adapter->num_tx_queues; i++) {
2071			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2072				sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2073					adapter->netdev->name, vector);
2074				err = request_irq(
2075					      intr->msix_entries[vector].vector,
2076					      vmxnet3_msix_tx, 0,
2077					      adapter->tx_queue[i].name,
2078					      &adapter->tx_queue[i]);
2079			} else {
2080				sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2081					adapter->netdev->name, vector);
2082			}
2083			if (err) {
2084				dev_err(&adapter->netdev->dev,
2085					"Failed to request irq for MSIX, %s, "
2086					"error %d\n",
2087					adapter->tx_queue[i].name, err);
2088				return err;
2089			}
2090
2091			/* Handle the case where only 1 MSIx was allocated for
2092			 * all tx queues */
2093			if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2094				for (; i < adapter->num_tx_queues; i++)
2095					adapter->tx_queue[i].comp_ring.intr_idx
2096								= vector;
2097				vector++;
2098				break;
2099			} else {
2100				adapter->tx_queue[i].comp_ring.intr_idx
2101								= vector++;
2102			}
2103		}
2104		if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2105			vector = 0;
2106
2107		for (i = 0; i < adapter->num_rx_queues; i++) {
2108			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2109				sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2110					adapter->netdev->name, vector);
2111			else
2112				sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2113					adapter->netdev->name, vector);
2114			err = request_irq(intr->msix_entries[vector].vector,
2115					  vmxnet3_msix_rx, 0,
2116					  adapter->rx_queue[i].name,
2117					  &(adapter->rx_queue[i]));
2118			if (err) {
2119				netdev_err(adapter->netdev,
2120					   "Failed to request irq for MSIX, "
2121					   "%s, error %d\n",
2122					   adapter->rx_queue[i].name, err);
2123				return err;
2124			}
2125
2126			adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2127		}
2128
2129		sprintf(intr->event_msi_vector_name, "%s-event-%d",
2130			adapter->netdev->name, vector);
2131		err = request_irq(intr->msix_entries[vector].vector,
2132				  vmxnet3_msix_event, 0,
2133				  intr->event_msi_vector_name, adapter->netdev);
2134		intr->event_intr_idx = vector;
2135
2136	} else if (intr->type == VMXNET3_IT_MSI) {
2137		adapter->num_rx_queues = 1;
2138		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2139				  adapter->netdev->name, adapter->netdev);
2140	} else {
2141#endif
2142		adapter->num_rx_queues = 1;
2143		err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2144				  IRQF_SHARED, adapter->netdev->name,
2145				  adapter->netdev);
2146#ifdef CONFIG_PCI_MSI
2147	}
2148#endif
2149	intr->num_intrs = vector + 1;
2150	if (err) {
2151		netdev_err(adapter->netdev,
2152			   "Failed to request irq (intr type:%d), error %d\n",
2153			   intr->type, err);
2154	} else {
2155		/* Number of rx queues will not change after this */
2156		for (i = 0; i < adapter->num_rx_queues; i++) {
2157			struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2158			rq->qid = i;
2159			rq->qid2 = i + adapter->num_rx_queues;
2160			rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2161		}
2162
2163		/* init our intr settings */
2164		for (i = 0; i < intr->num_intrs; i++)
2165			intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2166		if (adapter->intr.type != VMXNET3_IT_MSIX) {
2167			adapter->intr.event_intr_idx = 0;
2168			for (i = 0; i < adapter->num_tx_queues; i++)
2169				adapter->tx_queue[i].comp_ring.intr_idx = 0;
2170			adapter->rx_queue[0].comp_ring.intr_idx = 0;
2171		}
2172
2173		netdev_info(adapter->netdev,
2174			    "intr type %u, mode %u, %u vectors allocated\n",
2175			    intr->type, intr->mask_mode, intr->num_intrs);
2176	}
2177
2178	return err;
2179}
2180
2181
2182static void
2183vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2184{
2185	struct vmxnet3_intr *intr = &adapter->intr;
2186	BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2187
2188	switch (intr->type) {
2189#ifdef CONFIG_PCI_MSI
2190	case VMXNET3_IT_MSIX:
2191	{
2192		int i, vector = 0;
2193
2194		if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2195			for (i = 0; i < adapter->num_tx_queues; i++) {
2196				free_irq(intr->msix_entries[vector++].vector,
2197					 &(adapter->tx_queue[i]));
2198				if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2199					break;
2200			}
2201		}
2202
2203		for (i = 0; i < adapter->num_rx_queues; i++) {
2204			free_irq(intr->msix_entries[vector++].vector,
2205				 &(adapter->rx_queue[i]));
2206		}
2207
2208		free_irq(intr->msix_entries[vector].vector,
2209			 adapter->netdev);
2210		BUG_ON(vector >= intr->num_intrs);
2211		break;
2212	}
2213#endif
2214	case VMXNET3_IT_MSI:
2215		free_irq(adapter->pdev->irq, adapter->netdev);
2216		break;
2217	case VMXNET3_IT_INTX:
2218		free_irq(adapter->pdev->irq, adapter->netdev);
2219		break;
2220	default:
2221		BUG();
2222	}
2223}
2224
2225
2226static void
2227vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2228{
2229	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2230	u16 vid;
2231
2232	/* allow untagged pkts */
2233	VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2234
2235	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2236		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2237}
2238
2239
2240static int
2241vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2242{
2243	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2244
2245	if (!(netdev->flags & IFF_PROMISC)) {
2246		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2247		unsigned long flags;
2248
2249		VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2250		spin_lock_irqsave(&adapter->cmd_lock, flags);
2251		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2252				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2253		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2254	}
2255
2256	set_bit(vid, adapter->active_vlans);
2257
2258	return 0;
2259}
2260
2261
2262static int
2263vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2264{
2265	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2266
2267	if (!(netdev->flags & IFF_PROMISC)) {
2268		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2269		unsigned long flags;
2270
2271		VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2272		spin_lock_irqsave(&adapter->cmd_lock, flags);
2273		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2274				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2275		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2276	}
2277
2278	clear_bit(vid, adapter->active_vlans);
2279
2280	return 0;
2281}
2282
2283
2284static u8 *
2285vmxnet3_copy_mc(struct net_device *netdev)
2286{
2287	u8 *buf = NULL;
2288	u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2289
2290	/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2291	if (sz <= 0xffff) {
2292		/* We may be called with BH disabled */
2293		buf = kmalloc(sz, GFP_ATOMIC);
2294		if (buf) {
2295			struct netdev_hw_addr *ha;
2296			int i = 0;
2297
2298			netdev_for_each_mc_addr(ha, netdev)
2299				memcpy(buf + i++ * ETH_ALEN, ha->addr,
2300				       ETH_ALEN);
2301		}
2302	}
2303	return buf;
2304}
2305
2306
2307static void
2308vmxnet3_set_mc(struct net_device *netdev)
2309{
2310	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2311	unsigned long flags;
2312	struct Vmxnet3_RxFilterConf *rxConf =
2313					&adapter->shared->devRead.rxFilterConf;
2314	u8 *new_table = NULL;
2315	dma_addr_t new_table_pa = 0;
2316	bool new_table_pa_valid = false;
2317	u32 new_mode = VMXNET3_RXM_UCAST;
2318
2319	if (netdev->flags & IFF_PROMISC) {
2320		u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2321		memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2322
2323		new_mode |= VMXNET3_RXM_PROMISC;
2324	} else {
2325		vmxnet3_restore_vlan(adapter);
2326	}
2327
2328	if (netdev->flags & IFF_BROADCAST)
2329		new_mode |= VMXNET3_RXM_BCAST;
2330
2331	if (netdev->flags & IFF_ALLMULTI)
2332		new_mode |= VMXNET3_RXM_ALL_MULTI;
2333	else
2334		if (!netdev_mc_empty(netdev)) {
2335			new_table = vmxnet3_copy_mc(netdev);
2336			if (new_table) {
2337				size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2338
2339				rxConf->mfTableLen = cpu_to_le16(sz);
2340				new_table_pa = dma_map_single(
2341							&adapter->pdev->dev,
2342							new_table,
2343							sz,
2344							PCI_DMA_TODEVICE);
2345				if (!dma_mapping_error(&adapter->pdev->dev,
2346						       new_table_pa)) {
2347					new_mode |= VMXNET3_RXM_MCAST;
2348					new_table_pa_valid = true;
2349					rxConf->mfTablePA = cpu_to_le64(
2350								new_table_pa);
2351				}
2352			}
2353			if (!new_table_pa_valid) {
2354				netdev_info(netdev,
2355					    "failed to copy mcast list, setting ALL_MULTI\n");
2356				new_mode |= VMXNET3_RXM_ALL_MULTI;
2357			}
2358		}
2359
2360	if (!(new_mode & VMXNET3_RXM_MCAST)) {
2361		rxConf->mfTableLen = 0;
2362		rxConf->mfTablePA = 0;
2363	}
2364
2365	spin_lock_irqsave(&adapter->cmd_lock, flags);
2366	if (new_mode != rxConf->rxMode) {
2367		rxConf->rxMode = cpu_to_le32(new_mode);
2368		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2369				       VMXNET3_CMD_UPDATE_RX_MODE);
2370		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2371				       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2372	}
2373
2374	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2375			       VMXNET3_CMD_UPDATE_MAC_FILTERS);
2376	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2377
2378	if (new_table_pa_valid)
2379		dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2380				 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2381	kfree(new_table);
2382}
2383
2384void
2385vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2386{
2387	int i;
2388
2389	for (i = 0; i < adapter->num_rx_queues; i++)
2390		vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2391}
2392
2393
2394/*
2395 *   Set up driver_shared based on settings in adapter.
2396 */
2397
2398static void
2399vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2400{
2401	struct Vmxnet3_DriverShared *shared = adapter->shared;
2402	struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
 
2403	struct Vmxnet3_TxQueueConf *tqc;
2404	struct Vmxnet3_RxQueueConf *rqc;
2405	int i;
2406
2407	memset(shared, 0, sizeof(*shared));
2408
2409	/* driver settings */
2410	shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2411	devRead->misc.driverInfo.version = cpu_to_le32(
2412						VMXNET3_DRIVER_VERSION_NUM);
2413	devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2414				VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2415	devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2416	*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2417				*((u32 *)&devRead->misc.driverInfo.gos));
2418	devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2419	devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2420
2421	devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2422	devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2423
2424	/* set up feature flags */
2425	if (adapter->netdev->features & NETIF_F_RXCSUM)
2426		devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2427
2428	if (adapter->netdev->features & NETIF_F_LRO) {
2429		devRead->misc.uptFeatures |= UPT1_F_LRO;
2430		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2431	}
2432	if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2433		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2434
 
 
 
 
2435	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2436	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2437	devRead->misc.queueDescLen = cpu_to_le32(
2438		adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2439		adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2440
2441	/* tx queue settings */
2442	devRead->misc.numTxQueues =  adapter->num_tx_queues;
2443	for (i = 0; i < adapter->num_tx_queues; i++) {
2444		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
2445		BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2446		tqc = &adapter->tqd_start[i].conf;
2447		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
2448		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2449		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2450		tqc->ddPA           = cpu_to_le64(tq->buf_info_pa);
2451		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
2452		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
2453		tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2454		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
2455		tqc->ddLen          = cpu_to_le32(
2456					sizeof(struct vmxnet3_tx_buf_info) *
2457					tqc->txRingSize);
2458		tqc->intrIdx        = tq->comp_ring.intr_idx;
2459	}
2460
2461	/* rx queue settings */
2462	devRead->misc.numRxQueues = adapter->num_rx_queues;
2463	for (i = 0; i < adapter->num_rx_queues; i++) {
2464		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
2465		rqc = &adapter->rqd_start[i].conf;
2466		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2467		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2468		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
2469		rqc->ddPA            = cpu_to_le64(rq->buf_info_pa);
2470		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
2471		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
2472		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
2473		rqc->ddLen           = cpu_to_le32(
2474					sizeof(struct vmxnet3_rx_buf_info) *
2475					(rqc->rxRingSize[0] +
2476					 rqc->rxRingSize[1]));
2477		rqc->intrIdx         = rq->comp_ring.intr_idx;
2478		if (VMXNET3_VERSION_GE_3(adapter)) {
2479			rqc->rxDataRingBasePA =
2480				cpu_to_le64(rq->data_ring.basePA);
2481			rqc->rxDataRingDescSize =
2482				cpu_to_le16(rq->data_ring.desc_size);
2483		}
2484	}
2485
2486#ifdef VMXNET3_RSS
2487	memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2488
2489	if (adapter->rss) {
2490		struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2491
2492		devRead->misc.uptFeatures |= UPT1_F_RSS;
2493		devRead->misc.numRxQueues = adapter->num_rx_queues;
2494		rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2495				    UPT1_RSS_HASH_TYPE_IPV4 |
2496				    UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2497				    UPT1_RSS_HASH_TYPE_IPV6;
2498		rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2499		rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2500		rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2501		netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2502
2503		for (i = 0; i < rssConf->indTableSize; i++)
2504			rssConf->indTable[i] = ethtool_rxfh_indir_default(
2505				i, adapter->num_rx_queues);
2506
2507		devRead->rssConfDesc.confVer = 1;
2508		devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2509		devRead->rssConfDesc.confPA =
2510			cpu_to_le64(adapter->rss_conf_pa);
2511	}
2512
2513#endif /* VMXNET3_RSS */
2514
2515	/* intr settings */
2516	devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2517				     VMXNET3_IMM_AUTO;
2518	devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2519	for (i = 0; i < adapter->intr.num_intrs; i++)
2520		devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
 
 
2521
2522	devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2523	devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
 
 
 
 
 
 
 
 
 
 
2524
2525	/* rx filter settings */
2526	devRead->rxFilterConf.rxMode = 0;
2527	vmxnet3_restore_vlan(adapter);
2528	vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2529
2530	/* the rest are already zeroed */
2531}
2532
2533static void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2534vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2535{
2536	struct Vmxnet3_DriverShared *shared = adapter->shared;
2537	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2538	unsigned long flags;
2539
2540	if (!VMXNET3_VERSION_GE_3(adapter))
2541		return;
2542
2543	spin_lock_irqsave(&adapter->cmd_lock, flags);
2544	cmdInfo->varConf.confVer = 1;
2545	cmdInfo->varConf.confLen =
2546		cpu_to_le32(sizeof(*adapter->coal_conf));
2547	cmdInfo->varConf.confPA  = cpu_to_le64(adapter->coal_conf_pa);
2548
2549	if (adapter->default_coal_mode) {
2550		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2551				       VMXNET3_CMD_GET_COALESCE);
2552	} else {
2553		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2554				       VMXNET3_CMD_SET_COALESCE);
2555	}
2556
2557	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2558}
2559
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2560int
2561vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2562{
2563	int err, i;
2564	u32 ret;
2565	unsigned long flags;
2566
2567	netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2568		" ring sizes %u %u %u\n", adapter->netdev->name,
2569		adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2570		adapter->tx_queue[0].tx_ring.size,
2571		adapter->rx_queue[0].rx_ring[0].size,
2572		adapter->rx_queue[0].rx_ring[1].size);
2573
2574	vmxnet3_tq_init_all(adapter);
2575	err = vmxnet3_rq_init_all(adapter);
2576	if (err) {
2577		netdev_err(adapter->netdev,
2578			   "Failed to init rx queue error %d\n", err);
2579		goto rq_err;
2580	}
2581
2582	err = vmxnet3_request_irqs(adapter);
2583	if (err) {
2584		netdev_err(adapter->netdev,
2585			   "Failed to setup irq for error %d\n", err);
2586		goto irq_err;
2587	}
2588
2589	vmxnet3_setup_driver_shared(adapter);
2590
2591	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2592			       adapter->shared_pa));
2593	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2594			       adapter->shared_pa));
2595	spin_lock_irqsave(&adapter->cmd_lock, flags);
2596	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2597			       VMXNET3_CMD_ACTIVATE_DEV);
2598	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2599	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2600
2601	if (ret != 0) {
2602		netdev_err(adapter->netdev,
2603			   "Failed to activate dev: error %u\n", ret);
2604		err = -EINVAL;
2605		goto activate_err;
2606	}
2607
 
2608	vmxnet3_init_coalesce(adapter);
 
2609
2610	for (i = 0; i < adapter->num_rx_queues; i++) {
2611		VMXNET3_WRITE_BAR0_REG(adapter,
2612				VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2613				adapter->rx_queue[i].rx_ring[0].next2fill);
2614		VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2615				(i * VMXNET3_REG_ALIGN)),
2616				adapter->rx_queue[i].rx_ring[1].next2fill);
2617	}
2618
2619	/* Apply the rx filter settins last. */
2620	vmxnet3_set_mc(adapter->netdev);
2621
2622	/*
2623	 * Check link state when first activating device. It will start the
2624	 * tx queue if the link is up.
2625	 */
2626	vmxnet3_check_link(adapter, true);
 
2627	for (i = 0; i < adapter->num_rx_queues; i++)
2628		napi_enable(&adapter->rx_queue[i].napi);
2629	vmxnet3_enable_all_intrs(adapter);
2630	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2631	return 0;
2632
2633activate_err:
2634	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2635	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2636	vmxnet3_free_irqs(adapter);
2637irq_err:
2638rq_err:
2639	/* free up buffers we allocated */
2640	vmxnet3_rq_cleanup_all(adapter);
2641	return err;
2642}
2643
2644
2645void
2646vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2647{
2648	unsigned long flags;
2649	spin_lock_irqsave(&adapter->cmd_lock, flags);
2650	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2651	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2652}
2653
2654
2655int
2656vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2657{
2658	int i;
2659	unsigned long flags;
2660	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2661		return 0;
2662
2663
2664	spin_lock_irqsave(&adapter->cmd_lock, flags);
2665	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2666			       VMXNET3_CMD_QUIESCE_DEV);
2667	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2668	vmxnet3_disable_all_intrs(adapter);
2669
2670	for (i = 0; i < adapter->num_rx_queues; i++)
2671		napi_disable(&adapter->rx_queue[i].napi);
2672	netif_tx_disable(adapter->netdev);
2673	adapter->link_speed = 0;
2674	netif_carrier_off(adapter->netdev);
2675
2676	vmxnet3_tq_cleanup_all(adapter);
2677	vmxnet3_rq_cleanup_all(adapter);
2678	vmxnet3_free_irqs(adapter);
2679	return 0;
2680}
2681
2682
2683static void
2684vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2685{
2686	u32 tmp;
2687
2688	tmp = *(u32 *)mac;
2689	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2690
2691	tmp = (mac[5] << 8) | mac[4];
2692	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2693}
2694
2695
2696static int
2697vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2698{
2699	struct sockaddr *addr = p;
2700	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2701
2702	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2703	vmxnet3_write_mac_addr(adapter, addr->sa_data);
2704
2705	return 0;
2706}
2707
2708
2709/* ==================== initialization and cleanup routines ============ */
2710
2711static int
2712vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
2713{
2714	int err;
2715	unsigned long mmio_start, mmio_len;
2716	struct pci_dev *pdev = adapter->pdev;
2717
2718	err = pci_enable_device(pdev);
2719	if (err) {
2720		dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2721		return err;
2722	}
2723
2724	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2725					   vmxnet3_driver_name);
2726	if (err) {
2727		dev_err(&pdev->dev,
2728			"Failed to request region for adapter: error %d\n", err);
2729		goto err_enable_device;
2730	}
2731
2732	pci_set_master(pdev);
2733
2734	mmio_start = pci_resource_start(pdev, 0);
2735	mmio_len = pci_resource_len(pdev, 0);
2736	adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2737	if (!adapter->hw_addr0) {
2738		dev_err(&pdev->dev, "Failed to map bar0\n");
2739		err = -EIO;
2740		goto err_ioremap;
2741	}
2742
2743	mmio_start = pci_resource_start(pdev, 1);
2744	mmio_len = pci_resource_len(pdev, 1);
2745	adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2746	if (!adapter->hw_addr1) {
2747		dev_err(&pdev->dev, "Failed to map bar1\n");
2748		err = -EIO;
2749		goto err_bar1;
2750	}
2751	return 0;
2752
2753err_bar1:
2754	iounmap(adapter->hw_addr0);
2755err_ioremap:
2756	pci_release_selected_regions(pdev, (1 << 2) - 1);
2757err_enable_device:
2758	pci_disable_device(pdev);
2759	return err;
2760}
2761
2762
2763static void
2764vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2765{
2766	BUG_ON(!adapter->pdev);
2767
2768	iounmap(adapter->hw_addr0);
2769	iounmap(adapter->hw_addr1);
2770	pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2771	pci_disable_device(adapter->pdev);
2772}
2773
2774
2775static void
2776vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2777{
2778	size_t sz, i, ring0_size, ring1_size, comp_size;
2779	if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2780				    VMXNET3_MAX_ETH_HDR_SIZE) {
2781		adapter->skb_buf_size = adapter->netdev->mtu +
2782					VMXNET3_MAX_ETH_HDR_SIZE;
2783		if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2784			adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
 
 
2785
 
 
 
 
 
 
 
 
 
 
2786		adapter->rx_buf_per_pkt = 1;
2787	} else {
2788		adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2789		sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2790					    VMXNET3_MAX_ETH_HDR_SIZE;
2791		adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2792	}
2793
2794	/*
2795	 * for simplicity, force the ring0 size to be a multiple of
2796	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2797	 */
2798	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2799	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2800	ring0_size = (ring0_size + sz - 1) / sz * sz;
2801	ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2802			   sz * sz);
2803	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2804	ring1_size = (ring1_size + sz - 1) / sz * sz;
2805	ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
2806			   sz * sz);
 
 
 
 
 
2807	comp_size = ring0_size + ring1_size;
2808
2809	for (i = 0; i < adapter->num_rx_queues; i++) {
2810		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
2811
2812		rq->rx_ring[0].size = ring0_size;
2813		rq->rx_ring[1].size = ring1_size;
2814		rq->comp_ring.size = comp_size;
2815	}
2816}
2817
2818
2819int
2820vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2821		      u32 rx_ring_size, u32 rx_ring2_size,
2822		      u16 txdata_desc_size, u16 rxdata_desc_size)
2823{
2824	int err = 0, i;
2825
2826	for (i = 0; i < adapter->num_tx_queues; i++) {
2827		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
2828		tq->tx_ring.size   = tx_ring_size;
2829		tq->data_ring.size = tx_ring_size;
2830		tq->comp_ring.size = tx_ring_size;
2831		tq->txdata_desc_size = txdata_desc_size;
2832		tq->shared = &adapter->tqd_start[i].ctrl;
2833		tq->stopped = true;
2834		tq->adapter = adapter;
2835		tq->qid = i;
2836		err = vmxnet3_tq_create(tq, adapter);
2837		/*
2838		 * Too late to change num_tx_queues. We cannot do away with
2839		 * lesser number of queues than what we asked for
2840		 */
2841		if (err)
2842			goto queue_err;
2843	}
2844
2845	adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2846	adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2847	vmxnet3_adjust_rx_ring_size(adapter);
2848
2849	adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2850	for (i = 0; i < adapter->num_rx_queues; i++) {
2851		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2852		/* qid and qid2 for rx queues will be assigned later when num
2853		 * of rx queues is finalized after allocating intrs */
2854		rq->shared = &adapter->rqd_start[i].ctrl;
2855		rq->adapter = adapter;
2856		rq->data_ring.desc_size = rxdata_desc_size;
2857		err = vmxnet3_rq_create(rq, adapter);
2858		if (err) {
2859			if (i == 0) {
2860				netdev_err(adapter->netdev,
2861					   "Could not allocate any rx queues. "
2862					   "Aborting.\n");
2863				goto queue_err;
2864			} else {
2865				netdev_info(adapter->netdev,
2866					    "Number of rx queues changed "
2867					    "to : %d.\n", i);
2868				adapter->num_rx_queues = i;
2869				err = 0;
2870				break;
2871			}
2872		}
2873	}
2874
2875	if (!adapter->rxdataring_enabled)
2876		vmxnet3_rq_destroy_all_rxdataring(adapter);
2877
2878	return err;
2879queue_err:
2880	vmxnet3_tq_destroy_all(adapter);
2881	return err;
2882}
2883
2884static int
2885vmxnet3_open(struct net_device *netdev)
2886{
2887	struct vmxnet3_adapter *adapter;
2888	int err, i;
2889
2890	adapter = netdev_priv(netdev);
2891
2892	for (i = 0; i < adapter->num_tx_queues; i++)
2893		spin_lock_init(&adapter->tx_queue[i].tx_lock);
2894
2895	if (VMXNET3_VERSION_GE_3(adapter)) {
2896		unsigned long flags;
2897		u16 txdata_desc_size;
2898
2899		spin_lock_irqsave(&adapter->cmd_lock, flags);
2900		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2901				       VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
2902		txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
2903							 VMXNET3_REG_CMD);
2904		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2905
2906		if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
2907		    (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
2908		    (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
2909			adapter->txdata_desc_size =
2910				sizeof(struct Vmxnet3_TxDataDesc);
2911		} else {
2912			adapter->txdata_desc_size = txdata_desc_size;
2913		}
2914	} else {
2915		adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
2916	}
2917
2918	err = vmxnet3_create_queues(adapter,
2919				    adapter->tx_ring_size,
2920				    adapter->rx_ring_size,
2921				    adapter->rx_ring2_size,
2922				    adapter->txdata_desc_size,
2923				    adapter->rxdata_desc_size);
2924	if (err)
2925		goto queue_err;
2926
2927	err = vmxnet3_activate_dev(adapter);
2928	if (err)
2929		goto activate_err;
2930
2931	return 0;
2932
2933activate_err:
2934	vmxnet3_rq_destroy_all(adapter);
2935	vmxnet3_tq_destroy_all(adapter);
2936queue_err:
2937	return err;
2938}
2939
2940
2941static int
2942vmxnet3_close(struct net_device *netdev)
2943{
2944	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2945
2946	/*
2947	 * Reset_work may be in the middle of resetting the device, wait for its
2948	 * completion.
2949	 */
2950	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2951		usleep_range(1000, 2000);
2952
2953	vmxnet3_quiesce_dev(adapter);
2954
2955	vmxnet3_rq_destroy_all(adapter);
2956	vmxnet3_tq_destroy_all(adapter);
2957
2958	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2959
2960
2961	return 0;
2962}
2963
2964
2965void
2966vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2967{
2968	int i;
2969
2970	/*
2971	 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2972	 * vmxnet3_close() will deadlock.
2973	 */
2974	BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2975
2976	/* we need to enable NAPI, otherwise dev_close will deadlock */
2977	for (i = 0; i < adapter->num_rx_queues; i++)
2978		napi_enable(&adapter->rx_queue[i].napi);
2979	/*
2980	 * Need to clear the quiesce bit to ensure that vmxnet3_close
2981	 * can quiesce the device properly
2982	 */
2983	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2984	dev_close(adapter->netdev);
2985}
2986
2987
2988static int
2989vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2990{
2991	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2992	int err = 0;
2993
2994	netdev->mtu = new_mtu;
2995
2996	/*
2997	 * Reset_work may be in the middle of resetting the device, wait for its
2998	 * completion.
2999	 */
3000	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3001		usleep_range(1000, 2000);
3002
3003	if (netif_running(netdev)) {
3004		vmxnet3_quiesce_dev(adapter);
3005		vmxnet3_reset_dev(adapter);
3006
3007		/* we need to re-create the rx queue based on the new mtu */
3008		vmxnet3_rq_destroy_all(adapter);
3009		vmxnet3_adjust_rx_ring_size(adapter);
3010		err = vmxnet3_rq_create_all(adapter);
3011		if (err) {
3012			netdev_err(netdev,
3013				   "failed to re-create rx queues, "
3014				   " error %d. Closing it.\n", err);
3015			goto out;
3016		}
3017
3018		err = vmxnet3_activate_dev(adapter);
3019		if (err) {
3020			netdev_err(netdev,
3021				   "failed to re-activate, error %d. "
3022				   "Closing it\n", err);
3023			goto out;
3024		}
3025	}
3026
3027out:
3028	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3029	if (err)
3030		vmxnet3_force_close(adapter);
3031
3032	return err;
3033}
3034
3035
3036static void
3037vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
3038{
3039	struct net_device *netdev = adapter->netdev;
3040
3041	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3042		NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3043		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3044		NETIF_F_LRO;
3045	if (dma64)
3046		netdev->hw_features |= NETIF_F_HIGHDMA;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3047	netdev->vlan_features = netdev->hw_features &
3048				~(NETIF_F_HW_VLAN_CTAG_TX |
3049				  NETIF_F_HW_VLAN_CTAG_RX);
3050	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3051}
3052
3053
3054static void
3055vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3056{
3057	u32 tmp;
3058
3059	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3060	*(u32 *)mac = tmp;
3061
3062	tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3063	mac[4] = tmp & 0xff;
3064	mac[5] = (tmp >> 8) & 0xff;
3065}
3066
3067#ifdef CONFIG_PCI_MSI
3068
3069/*
3070 * Enable MSIx vectors.
3071 * Returns :
3072 *	VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3073 *	 were enabled.
3074 *	number of vectors which were enabled otherwise (this number is greater
3075 *	 than VMXNET3_LINUX_MIN_MSIX_VECT)
3076 */
3077
3078static int
3079vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3080{
3081	int ret = pci_enable_msix_range(adapter->pdev,
3082					adapter->intr.msix_entries, nvec, nvec);
3083
3084	if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3085		dev_err(&adapter->netdev->dev,
3086			"Failed to enable %d MSI-X, trying %d\n",
3087			nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3088
3089		ret = pci_enable_msix_range(adapter->pdev,
3090					    adapter->intr.msix_entries,
3091					    VMXNET3_LINUX_MIN_MSIX_VECT,
3092					    VMXNET3_LINUX_MIN_MSIX_VECT);
3093	}
3094
3095	if (ret < 0) {
3096		dev_err(&adapter->netdev->dev,
3097			"Failed to enable MSI-X, error: %d\n", ret);
3098	}
3099
3100	return ret;
3101}
3102
3103
3104#endif /* CONFIG_PCI_MSI */
3105
3106static void
3107vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3108{
3109	u32 cfg;
3110	unsigned long flags;
3111
3112	/* intr settings */
3113	spin_lock_irqsave(&adapter->cmd_lock, flags);
3114	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3115			       VMXNET3_CMD_GET_CONF_INTR);
3116	cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3117	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3118	adapter->intr.type = cfg & 0x3;
3119	adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3120
3121	if (adapter->intr.type == VMXNET3_IT_AUTO) {
3122		adapter->intr.type = VMXNET3_IT_MSIX;
3123	}
3124
3125#ifdef CONFIG_PCI_MSI
3126	if (adapter->intr.type == VMXNET3_IT_MSIX) {
3127		int i, nvec;
3128
3129		nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3130			1 : adapter->num_tx_queues;
3131		nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3132			0 : adapter->num_rx_queues;
3133		nvec += 1;	/* for link event */
3134		nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3135		       nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3136
3137		for (i = 0; i < nvec; i++)
3138			adapter->intr.msix_entries[i].entry = i;
3139
3140		nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
3141		if (nvec < 0)
3142			goto msix_err;
3143
3144		/* If we cannot allocate one MSIx vector per queue
3145		 * then limit the number of rx queues to 1
3146		 */
3147		if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
 
3148			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3149			    || adapter->num_rx_queues != 1) {
3150				adapter->share_intr = VMXNET3_INTR_TXSHARE;
3151				netdev_err(adapter->netdev,
3152					   "Number of rx queues : 1\n");
3153				adapter->num_rx_queues = 1;
3154			}
3155		}
3156
3157		adapter->intr.num_intrs = nvec;
3158		return;
3159
3160msix_err:
3161		/* If we cannot allocate MSIx vectors use only one rx queue */
3162		dev_info(&adapter->pdev->dev,
3163			 "Failed to enable MSI-X, error %d. "
3164			 "Limiting #rx queues to 1, try MSI.\n", nvec);
3165
3166		adapter->intr.type = VMXNET3_IT_MSI;
3167	}
3168
3169	if (adapter->intr.type == VMXNET3_IT_MSI) {
3170		if (!pci_enable_msi(adapter->pdev)) {
3171			adapter->num_rx_queues = 1;
3172			adapter->intr.num_intrs = 1;
3173			return;
3174		}
3175	}
3176#endif /* CONFIG_PCI_MSI */
3177
3178	adapter->num_rx_queues = 1;
3179	dev_info(&adapter->netdev->dev,
3180		 "Using INTx interrupt, #Rx queues: 1.\n");
3181	adapter->intr.type = VMXNET3_IT_INTX;
3182
3183	/* INT-X related setting */
3184	adapter->intr.num_intrs = 1;
3185}
3186
3187
3188static void
3189vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3190{
3191	if (adapter->intr.type == VMXNET3_IT_MSIX)
3192		pci_disable_msix(adapter->pdev);
3193	else if (adapter->intr.type == VMXNET3_IT_MSI)
3194		pci_disable_msi(adapter->pdev);
3195	else
3196		BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3197}
3198
3199
3200static void
3201vmxnet3_tx_timeout(struct net_device *netdev)
3202{
3203	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3204	adapter->tx_timeout_count++;
3205
3206	netdev_err(adapter->netdev, "tx hang\n");
3207	schedule_work(&adapter->work);
3208}
3209
3210
3211static void
3212vmxnet3_reset_work(struct work_struct *data)
3213{
3214	struct vmxnet3_adapter *adapter;
3215
3216	adapter = container_of(data, struct vmxnet3_adapter, work);
3217
3218	/* if another thread is resetting the device, no need to proceed */
3219	if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3220		return;
3221
3222	/* if the device is closed, we must leave it alone */
3223	rtnl_lock();
3224	if (netif_running(adapter->netdev)) {
3225		netdev_notice(adapter->netdev, "resetting\n");
3226		vmxnet3_quiesce_dev(adapter);
3227		vmxnet3_reset_dev(adapter);
3228		vmxnet3_activate_dev(adapter);
3229	} else {
3230		netdev_info(adapter->netdev, "already closed\n");
3231	}
3232	rtnl_unlock();
3233
3234	netif_wake_queue(adapter->netdev);
3235	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3236}
3237
3238
3239static int
3240vmxnet3_probe_device(struct pci_dev *pdev,
3241		     const struct pci_device_id *id)
3242{
3243	static const struct net_device_ops vmxnet3_netdev_ops = {
3244		.ndo_open = vmxnet3_open,
3245		.ndo_stop = vmxnet3_close,
3246		.ndo_start_xmit = vmxnet3_xmit_frame,
3247		.ndo_set_mac_address = vmxnet3_set_mac_addr,
3248		.ndo_change_mtu = vmxnet3_change_mtu,
3249		.ndo_fix_features = vmxnet3_fix_features,
3250		.ndo_set_features = vmxnet3_set_features,
 
3251		.ndo_get_stats64 = vmxnet3_get_stats64,
3252		.ndo_tx_timeout = vmxnet3_tx_timeout,
3253		.ndo_set_rx_mode = vmxnet3_set_mc,
3254		.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3255		.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3256#ifdef CONFIG_NET_POLL_CONTROLLER
3257		.ndo_poll_controller = vmxnet3_netpoll,
3258#endif
 
 
3259	};
3260	int err;
3261	bool dma64;
3262	u32 ver;
3263	struct net_device *netdev;
3264	struct vmxnet3_adapter *adapter;
3265	u8 mac[ETH_ALEN];
3266	int size;
3267	int num_tx_queues;
3268	int num_rx_queues;
 
 
3269
3270	if (!pci_msi_enabled())
3271		enable_mq = 0;
3272
3273#ifdef VMXNET3_RSS
3274	if (enable_mq)
3275		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3276				    (int)num_online_cpus());
3277	else
3278#endif
3279		num_rx_queues = 1;
3280	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3281
3282	if (enable_mq)
3283		num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3284				    (int)num_online_cpus());
3285	else
3286		num_tx_queues = 1;
3287
3288	num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3289	netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3290				   max(num_tx_queues, num_rx_queues));
3291	dev_info(&pdev->dev,
3292		 "# of Tx queues : %d, # of Rx queues : %d\n",
3293		 num_tx_queues, num_rx_queues);
3294
3295	if (!netdev)
3296		return -ENOMEM;
3297
3298	pci_set_drvdata(pdev, netdev);
3299	adapter = netdev_priv(netdev);
3300	adapter->netdev = netdev;
3301	adapter->pdev = pdev;
3302
3303	adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3304	adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3305	adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3306
3307	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
3308		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
3309			dev_err(&pdev->dev,
3310				"pci_set_consistent_dma_mask failed\n");
3311			err = -EIO;
3312			goto err_set_mask;
3313		}
3314		dma64 = true;
3315	} else {
3316		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
3317			dev_err(&pdev->dev,
3318				"pci_set_dma_mask failed\n");
3319			err = -EIO;
3320			goto err_set_mask;
3321		}
3322		dma64 = false;
3323	}
3324
3325	spin_lock_init(&adapter->cmd_lock);
3326	adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3327					     sizeof(struct vmxnet3_adapter),
3328					     PCI_DMA_TODEVICE);
3329	if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3330		dev_err(&pdev->dev, "Failed to map dma\n");
3331		err = -EFAULT;
3332		goto err_set_mask;
3333	}
3334	adapter->shared = dma_alloc_coherent(
3335				&adapter->pdev->dev,
3336				sizeof(struct Vmxnet3_DriverShared),
3337				&adapter->shared_pa, GFP_KERNEL);
3338	if (!adapter->shared) {
3339		dev_err(&pdev->dev, "Failed to allocate memory\n");
3340		err = -ENOMEM;
3341		goto err_alloc_shared;
3342	}
3343
3344	adapter->num_rx_queues = num_rx_queues;
3345	adapter->num_tx_queues = num_tx_queues;
3346	adapter->rx_buf_per_pkt = 1;
3347
3348	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3349	size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3350	adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3351						&adapter->queue_desc_pa,
3352						GFP_KERNEL);
3353
3354	if (!adapter->tqd_start) {
3355		dev_err(&pdev->dev, "Failed to allocate memory\n");
3356		err = -ENOMEM;
3357		goto err_alloc_queue_desc;
3358	}
3359	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3360							    adapter->num_tx_queues);
3361
3362	adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3363					      sizeof(struct Vmxnet3_PMConf),
3364					      &adapter->pm_conf_pa,
3365					      GFP_KERNEL);
3366	if (adapter->pm_conf == NULL) {
3367		err = -ENOMEM;
3368		goto err_alloc_pm;
3369	}
3370
3371#ifdef VMXNET3_RSS
3372
3373	adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3374					       sizeof(struct UPT1_RSSConf),
3375					       &adapter->rss_conf_pa,
3376					       GFP_KERNEL);
3377	if (adapter->rss_conf == NULL) {
3378		err = -ENOMEM;
3379		goto err_alloc_rss;
3380	}
3381#endif /* VMXNET3_RSS */
3382
3383	err = vmxnet3_alloc_pci_resources(adapter);
3384	if (err < 0)
3385		goto err_alloc_pci;
3386
3387	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3388	if (ver & (1 << VMXNET3_REV_3)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3389		VMXNET3_WRITE_BAR1_REG(adapter,
3390				       VMXNET3_REG_VRRS,
3391				       1 << VMXNET3_REV_3);
3392		adapter->version = VMXNET3_REV_3 + 1;
3393	} else if (ver & (1 << VMXNET3_REV_2)) {
3394		VMXNET3_WRITE_BAR1_REG(adapter,
3395				       VMXNET3_REG_VRRS,
3396				       1 << VMXNET3_REV_2);
3397		adapter->version = VMXNET3_REV_2 + 1;
3398	} else if (ver & (1 << VMXNET3_REV_1)) {
3399		VMXNET3_WRITE_BAR1_REG(adapter,
3400				       VMXNET3_REG_VRRS,
3401				       1 << VMXNET3_REV_1);
3402		adapter->version = VMXNET3_REV_1 + 1;
3403	} else {
3404		dev_err(&pdev->dev,
3405			"Incompatible h/w version (0x%x) for adapter\n", ver);
3406		err = -EBUSY;
3407		goto err_ver;
3408	}
3409	dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3410
3411	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3412	if (ver & 1) {
3413		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3414	} else {
3415		dev_err(&pdev->dev,
3416			"Incompatible upt version (0x%x) for adapter\n", ver);
3417		err = -EBUSY;
3418		goto err_ver;
3419	}
3420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3421	if (VMXNET3_VERSION_GE_3(adapter)) {
3422		adapter->coal_conf =
3423			dma_alloc_coherent(&adapter->pdev->dev,
3424					   sizeof(struct Vmxnet3_CoalesceScheme)
3425					   ,
3426					   &adapter->coal_conf_pa,
3427					   GFP_KERNEL);
3428		if (!adapter->coal_conf) {
3429			err = -ENOMEM;
3430			goto err_ver;
3431		}
3432		adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3433		adapter->default_coal_mode = true;
3434	}
3435
 
 
 
 
 
3436	SET_NETDEV_DEV(netdev, &pdev->dev);
3437	vmxnet3_declare_features(adapter, dma64);
 
 
3438
3439	adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3440		VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3441
3442	if (adapter->num_tx_queues == adapter->num_rx_queues)
3443		adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3444	else
3445		adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3446
3447	vmxnet3_alloc_intr_resources(adapter);
3448
3449#ifdef VMXNET3_RSS
3450	if (adapter->num_rx_queues > 1 &&
3451	    adapter->intr.type == VMXNET3_IT_MSIX) {
3452		adapter->rss = true;
3453		netdev->hw_features |= NETIF_F_RXHASH;
3454		netdev->features |= NETIF_F_RXHASH;
3455		dev_dbg(&pdev->dev, "RSS is enabled.\n");
3456	} else {
3457		adapter->rss = false;
3458	}
3459#endif
3460
3461	vmxnet3_read_mac_addr(adapter, mac);
3462	memcpy(netdev->dev_addr,  mac, netdev->addr_len);
3463
3464	netdev->netdev_ops = &vmxnet3_netdev_ops;
3465	vmxnet3_set_ethtool_ops(netdev);
3466	netdev->watchdog_timeo = 5 * HZ;
3467
3468	/* MTU range: 60 - 9000 */
3469	netdev->min_mtu = VMXNET3_MIN_MTU;
3470	netdev->max_mtu = VMXNET3_MAX_MTU;
 
 
 
3471
3472	INIT_WORK(&adapter->work, vmxnet3_reset_work);
3473	set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3474
3475	if (adapter->intr.type == VMXNET3_IT_MSIX) {
3476		int i;
3477		for (i = 0; i < adapter->num_rx_queues; i++) {
3478			netif_napi_add(adapter->netdev,
3479				       &adapter->rx_queue[i].napi,
3480				       vmxnet3_poll_rx_only, 64);
3481		}
3482	} else {
3483		netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3484			       vmxnet3_poll, 64);
3485	}
3486
3487	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3488	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3489
3490	netif_carrier_off(netdev);
3491	err = register_netdev(netdev);
3492
3493	if (err) {
3494		dev_err(&pdev->dev, "Failed to register adapter\n");
3495		goto err_register;
3496	}
3497
3498	vmxnet3_check_link(adapter, false);
3499	return 0;
3500
3501err_register:
3502	if (VMXNET3_VERSION_GE_3(adapter)) {
3503		dma_free_coherent(&adapter->pdev->dev,
3504				  sizeof(struct Vmxnet3_CoalesceScheme),
3505				  adapter->coal_conf, adapter->coal_conf_pa);
3506	}
3507	vmxnet3_free_intr_resources(adapter);
3508err_ver:
3509	vmxnet3_free_pci_resources(adapter);
3510err_alloc_pci:
3511#ifdef VMXNET3_RSS
3512	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3513			  adapter->rss_conf, adapter->rss_conf_pa);
3514err_alloc_rss:
3515#endif
3516	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3517			  adapter->pm_conf, adapter->pm_conf_pa);
3518err_alloc_pm:
3519	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3520			  adapter->queue_desc_pa);
3521err_alloc_queue_desc:
 
 
3522	dma_free_coherent(&adapter->pdev->dev,
3523			  sizeof(struct Vmxnet3_DriverShared),
3524			  adapter->shared, adapter->shared_pa);
3525err_alloc_shared:
3526	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3527			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3528err_set_mask:
3529	free_netdev(netdev);
3530	return err;
3531}
3532
3533
3534static void
3535vmxnet3_remove_device(struct pci_dev *pdev)
3536{
3537	struct net_device *netdev = pci_get_drvdata(pdev);
3538	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3539	int size = 0;
3540	int num_rx_queues;
 
3541
3542#ifdef VMXNET3_RSS
3543	if (enable_mq)
3544		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3545				    (int)num_online_cpus());
3546	else
3547#endif
3548		num_rx_queues = 1;
3549	num_rx_queues = rounddown_pow_of_two(num_rx_queues);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3550
3551	cancel_work_sync(&adapter->work);
3552
3553	unregister_netdev(netdev);
3554
3555	vmxnet3_free_intr_resources(adapter);
3556	vmxnet3_free_pci_resources(adapter);
3557	if (VMXNET3_VERSION_GE_3(adapter)) {
3558		dma_free_coherent(&adapter->pdev->dev,
3559				  sizeof(struct Vmxnet3_CoalesceScheme),
3560				  adapter->coal_conf, adapter->coal_conf_pa);
3561	}
3562#ifdef VMXNET3_RSS
3563	dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3564			  adapter->rss_conf, adapter->rss_conf_pa);
3565#endif
3566	dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3567			  adapter->pm_conf, adapter->pm_conf_pa);
3568
3569	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3570	size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3571	dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3572			  adapter->queue_desc_pa);
3573	dma_free_coherent(&adapter->pdev->dev,
3574			  sizeof(struct Vmxnet3_DriverShared),
3575			  adapter->shared, adapter->shared_pa);
3576	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3577			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3578	free_netdev(netdev);
3579}
3580
3581static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3582{
3583	struct net_device *netdev = pci_get_drvdata(pdev);
3584	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3585	unsigned long flags;
3586
3587	/* Reset_work may be in the middle of resetting the device, wait for its
3588	 * completion.
3589	 */
3590	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3591		usleep_range(1000, 2000);
3592
3593	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3594			     &adapter->state)) {
3595		clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3596		return;
3597	}
3598	spin_lock_irqsave(&adapter->cmd_lock, flags);
3599	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3600			       VMXNET3_CMD_QUIESCE_DEV);
3601	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3602	vmxnet3_disable_all_intrs(adapter);
3603
3604	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3605}
3606
3607
3608#ifdef CONFIG_PM
3609
3610static int
3611vmxnet3_suspend(struct device *device)
3612{
3613	struct pci_dev *pdev = to_pci_dev(device);
3614	struct net_device *netdev = pci_get_drvdata(pdev);
3615	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3616	struct Vmxnet3_PMConf *pmConf;
3617	struct ethhdr *ehdr;
3618	struct arphdr *ahdr;
3619	u8 *arpreq;
3620	struct in_device *in_dev;
3621	struct in_ifaddr *ifa;
3622	unsigned long flags;
3623	int i = 0;
3624
3625	if (!netif_running(netdev))
3626		return 0;
3627
3628	for (i = 0; i < adapter->num_rx_queues; i++)
3629		napi_disable(&adapter->rx_queue[i].napi);
3630
3631	vmxnet3_disable_all_intrs(adapter);
3632	vmxnet3_free_irqs(adapter);
3633	vmxnet3_free_intr_resources(adapter);
3634
3635	netif_device_detach(netdev);
3636	netif_tx_stop_all_queues(netdev);
3637
3638	/* Create wake-up filters. */
3639	pmConf = adapter->pm_conf;
3640	memset(pmConf, 0, sizeof(*pmConf));
3641
3642	if (adapter->wol & WAKE_UCAST) {
3643		pmConf->filters[i].patternSize = ETH_ALEN;
3644		pmConf->filters[i].maskSize = 1;
3645		memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3646		pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3647
3648		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3649		i++;
3650	}
3651
3652	if (adapter->wol & WAKE_ARP) {
3653		rcu_read_lock();
3654
3655		in_dev = __in_dev_get_rcu(netdev);
3656		if (!in_dev) {
3657			rcu_read_unlock();
3658			goto skip_arp;
3659		}
3660
3661		ifa = rcu_dereference(in_dev->ifa_list);
3662		if (!ifa) {
3663			rcu_read_unlock();
3664			goto skip_arp;
3665		}
3666
3667		pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3668			sizeof(struct arphdr) +		/* ARP header */
3669			2 * ETH_ALEN +		/* 2 Ethernet addresses*/
3670			2 * sizeof(u32);	/*2 IPv4 addresses */
3671		pmConf->filters[i].maskSize =
3672			(pmConf->filters[i].patternSize - 1) / 8 + 1;
3673
3674		/* ETH_P_ARP in Ethernet header. */
3675		ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3676		ehdr->h_proto = htons(ETH_P_ARP);
3677
3678		/* ARPOP_REQUEST in ARP header. */
3679		ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3680		ahdr->ar_op = htons(ARPOP_REQUEST);
3681		arpreq = (u8 *)(ahdr + 1);
3682
3683		/* The Unicast IPv4 address in 'tip' field. */
3684		arpreq += 2 * ETH_ALEN + sizeof(u32);
3685		*(__be32 *)arpreq = ifa->ifa_address;
3686
3687		rcu_read_unlock();
3688
3689		/* The mask for the relevant bits. */
3690		pmConf->filters[i].mask[0] = 0x00;
3691		pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3692		pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3693		pmConf->filters[i].mask[3] = 0x00;
3694		pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3695		pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3696
3697		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3698		i++;
3699	}
3700
3701skip_arp:
3702	if (adapter->wol & WAKE_MAGIC)
3703		pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3704
3705	pmConf->numFilters = i;
3706
3707	adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3708	adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3709								  *pmConf));
3710	adapter->shared->devRead.pmConfDesc.confPA =
3711		cpu_to_le64(adapter->pm_conf_pa);
3712
3713	spin_lock_irqsave(&adapter->cmd_lock, flags);
3714	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3715			       VMXNET3_CMD_UPDATE_PMCFG);
3716	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3717
3718	pci_save_state(pdev);
3719	pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3720			adapter->wol);
3721	pci_disable_device(pdev);
3722	pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3723
3724	return 0;
3725}
3726
3727
3728static int
3729vmxnet3_resume(struct device *device)
3730{
3731	int err;
3732	unsigned long flags;
3733	struct pci_dev *pdev = to_pci_dev(device);
3734	struct net_device *netdev = pci_get_drvdata(pdev);
3735	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3736
3737	if (!netif_running(netdev))
3738		return 0;
3739
3740	pci_set_power_state(pdev, PCI_D0);
3741	pci_restore_state(pdev);
3742	err = pci_enable_device_mem(pdev);
3743	if (err != 0)
3744		return err;
3745
3746	pci_enable_wake(pdev, PCI_D0, 0);
3747
3748	vmxnet3_alloc_intr_resources(adapter);
3749
3750	/* During hibernate and suspend, device has to be reinitialized as the
3751	 * device state need not be preserved.
3752	 */
3753
3754	/* Need not check adapter state as other reset tasks cannot run during
3755	 * device resume.
3756	 */
3757	spin_lock_irqsave(&adapter->cmd_lock, flags);
3758	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3759			       VMXNET3_CMD_QUIESCE_DEV);
3760	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3761	vmxnet3_tq_cleanup_all(adapter);
3762	vmxnet3_rq_cleanup_all(adapter);
3763
3764	vmxnet3_reset_dev(adapter);
3765	err = vmxnet3_activate_dev(adapter);
3766	if (err != 0) {
3767		netdev_err(netdev,
3768			   "failed to re-activate on resume, error: %d", err);
3769		vmxnet3_force_close(adapter);
3770		return err;
3771	}
3772	netif_device_attach(netdev);
3773
3774	return 0;
3775}
3776
3777static const struct dev_pm_ops vmxnet3_pm_ops = {
3778	.suspend = vmxnet3_suspend,
3779	.resume = vmxnet3_resume,
3780	.freeze = vmxnet3_suspend,
3781	.restore = vmxnet3_resume,
3782};
3783#endif
3784
3785static struct pci_driver vmxnet3_driver = {
3786	.name		= vmxnet3_driver_name,
3787	.id_table	= vmxnet3_pciid_table,
3788	.probe		= vmxnet3_probe_device,
3789	.remove		= vmxnet3_remove_device,
3790	.shutdown	= vmxnet3_shutdown_device,
3791#ifdef CONFIG_PM
3792	.driver.pm	= &vmxnet3_pm_ops,
3793#endif
3794};
3795
3796
3797static int __init
3798vmxnet3_init_module(void)
3799{
3800	pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3801		VMXNET3_DRIVER_VERSION_REPORT);
3802	return pci_register_driver(&vmxnet3_driver);
3803}
3804
3805module_init(vmxnet3_init_module);
3806
3807
3808static void
3809vmxnet3_exit_module(void)
3810{
3811	pci_unregister_driver(&vmxnet3_driver);
3812}
3813
3814module_exit(vmxnet3_exit_module);
3815
3816MODULE_AUTHOR("VMware, Inc.");
3817MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3818MODULE_LICENSE("GPL v2");
3819MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);