Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/kernel.h>
  22#include <linux/string.h>
  23#include <linux/errno.h>
  24#include <linux/types.h>
  25#include <linux/init.h>
  26#include <linux/interrupt.h>
  27#include <linux/workqueue.h>
  28#include <linux/pci.h>
  29#include <linux/netdevice.h>
  30#include <linux/etherdevice.h>
  31#include <linux/if.h>
  32#include <linux/if_ether.h>
  33#include <linux/if_vlan.h>
  34#include <linux/in.h>
  35#include <linux/ip.h>
  36#include <linux/ipv6.h>
  37#include <linux/tcp.h>
  38#include <linux/rtnetlink.h>
  39#include <linux/prefetch.h>
  40#include <net/ip6_checksum.h>
  41#include <linux/ktime.h>
  42#include <linux/numa.h>
  43#ifdef CONFIG_RFS_ACCEL
  44#include <linux/cpu_rmap.h>
  45#endif
  46#include <linux/crash_dump.h>
  47#include <net/busy_poll.h>
  48#include <net/vxlan.h>
  49
  50#include "cq_enet_desc.h"
  51#include "vnic_dev.h"
  52#include "vnic_intr.h"
  53#include "vnic_stats.h"
  54#include "vnic_vic.h"
  55#include "enic_res.h"
  56#include "enic.h"
  57#include "enic_dev.h"
  58#include "enic_pp.h"
  59#include "enic_clsf.h"
  60
  61#define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
  62#define WQ_ENET_MAX_DESC_LEN		(1 << WQ_ENET_LEN_BITS)
  63#define MAX_TSO				(1 << 16)
  64#define ENIC_DESC_MAX_SPLITS		(MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
  65
  66#define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
  67#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
  68#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */
  69
  70#define RX_COPYBREAK_DEFAULT		256
  71
  72/* Supported devices */
  73static const struct pci_device_id enic_id_table[] = {
  74	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
  75	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
  76	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
  77	{ 0, }	/* end of table */
  78};
  79
  80MODULE_DESCRIPTION(DRV_DESCRIPTION);
  81MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
  82MODULE_LICENSE("GPL");
  83MODULE_DEVICE_TABLE(pci, enic_id_table);
  84
  85#define ENIC_LARGE_PKT_THRESHOLD		1000
  86#define ENIC_MAX_COALESCE_TIMERS		10
  87/*  Interrupt moderation table, which will be used to decide the
  88 *  coalescing timer values
  89 *  {rx_rate in Mbps, mapping percentage of the range}
  90 */
  91static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
  92	{4000,  0},
  93	{4400, 10},
  94	{5060, 20},
  95	{5230, 30},
  96	{5540, 40},
  97	{5820, 50},
  98	{6120, 60},
  99	{6435, 70},
 100	{6745, 80},
 101	{7000, 90},
 102	{0xFFFFFFFF, 100}
 103};
 104
 105/* This table helps the driver to pick different ranges for rx coalescing
 106 * timer depending on the link speed.
 107 */
 108static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
 109	{0,  0}, /* 0  - 4  Gbps */
 110	{0,  3}, /* 4  - 10 Gbps */
 111	{3,  6}, /* 10 - 40 Gbps */
 112};
 113
 114static void enic_init_affinity_hint(struct enic *enic)
 115{
 116	int numa_node = dev_to_node(&enic->pdev->dev);
 117	int i;
 118
 119	for (i = 0; i < enic->intr_count; i++) {
 120		if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
 121		    (cpumask_available(enic->msix[i].affinity_mask) &&
 122		     !cpumask_empty(enic->msix[i].affinity_mask)))
 123			continue;
 124		if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
 125				       GFP_KERNEL))
 126			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
 127					enic->msix[i].affinity_mask);
 128	}
 129}
 130
 131static void enic_free_affinity_hint(struct enic *enic)
 132{
 133	int i;
 134
 135	for (i = 0; i < enic->intr_count; i++) {
 136		if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i))
 137			continue;
 138		free_cpumask_var(enic->msix[i].affinity_mask);
 139	}
 140}
 141
 142static void enic_set_affinity_hint(struct enic *enic)
 143{
 144	int i;
 145	int err;
 146
 147	for (i = 0; i < enic->intr_count; i++) {
 148		if (enic_is_err_intr(enic, i)		||
 149		    enic_is_notify_intr(enic, i)	||
 150		    !cpumask_available(enic->msix[i].affinity_mask) ||
 151		    cpumask_empty(enic->msix[i].affinity_mask))
 152			continue;
 153		err = irq_update_affinity_hint(enic->msix_entry[i].vector,
 154					       enic->msix[i].affinity_mask);
 155		if (err)
 156			netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
 157				    err);
 158	}
 159
 160	for (i = 0; i < enic->wq_count; i++) {
 161		int wq_intr = enic_msix_wq_intr(enic, i);
 162
 163		if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
 164		    !cpumask_empty(enic->msix[wq_intr].affinity_mask))
 165			netif_set_xps_queue(enic->netdev,
 166					    enic->msix[wq_intr].affinity_mask,
 167					    i);
 168	}
 169}
 170
 171static void enic_unset_affinity_hint(struct enic *enic)
 172{
 173	int i;
 174
 175	for (i = 0; i < enic->intr_count; i++)
 176		irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
 177}
 178
 179static int enic_udp_tunnel_set_port(struct net_device *netdev,
 180				    unsigned int table, unsigned int entry,
 181				    struct udp_tunnel_info *ti)
 182{
 183	struct enic *enic = netdev_priv(netdev);
 184	int err;
 185
 186	spin_lock_bh(&enic->devcmd_lock);
 187
 188	err = vnic_dev_overlay_offload_cfg(enic->vdev,
 189					   OVERLAY_CFG_VXLAN_PORT_UPDATE,
 190					   ntohs(ti->port));
 191	if (err)
 192		goto error;
 193
 194	err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
 195					    enic->vxlan.patch_level);
 196	if (err)
 197		goto error;
 198
 199	enic->vxlan.vxlan_udp_port_number = ntohs(ti->port);
 200error:
 201	spin_unlock_bh(&enic->devcmd_lock);
 202
 203	return err;
 204}
 205
 206static int enic_udp_tunnel_unset_port(struct net_device *netdev,
 207				      unsigned int table, unsigned int entry,
 208				      struct udp_tunnel_info *ti)
 209{
 210	struct enic *enic = netdev_priv(netdev);
 211	int err;
 212
 213	spin_lock_bh(&enic->devcmd_lock);
 214
 215	err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
 216					    OVERLAY_OFFLOAD_DISABLE);
 217	if (err)
 218		goto unlock;
 219
 220	enic->vxlan.vxlan_udp_port_number = 0;
 221
 222unlock:
 223	spin_unlock_bh(&enic->devcmd_lock);
 224
 225	return err;
 226}
 227
 228static const struct udp_tunnel_nic_info enic_udp_tunnels = {
 229	.set_port	= enic_udp_tunnel_set_port,
 230	.unset_port	= enic_udp_tunnel_unset_port,
 231	.tables		= {
 232		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
 233	},
 234}, enic_udp_tunnels_v4 = {
 235	.set_port	= enic_udp_tunnel_set_port,
 236	.unset_port	= enic_udp_tunnel_unset_port,
 237	.flags		= UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
 238	.tables		= {
 239		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
 240	},
 241};
 242
 243static netdev_features_t enic_features_check(struct sk_buff *skb,
 244					     struct net_device *dev,
 245					     netdev_features_t features)
 246{
 247	const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
 248	struct enic *enic = netdev_priv(dev);
 249	struct udphdr *udph;
 250	u16 port = 0;
 251	u8 proto;
 252
 253	if (!skb->encapsulation)
 254		return features;
 255
 256	features = vxlan_features_check(skb, features);
 257
 258	switch (vlan_get_protocol(skb)) {
 259	case htons(ETH_P_IPV6):
 260		if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6))
 261			goto out;
 262		proto = ipv6_hdr(skb)->nexthdr;
 263		break;
 264	case htons(ETH_P_IP):
 265		proto = ip_hdr(skb)->protocol;
 266		break;
 267	default:
 268		goto out;
 269	}
 270
 271	switch (eth->h_proto) {
 272	case ntohs(ETH_P_IPV6):
 273		if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
 274			goto out;
 275		fallthrough;
 276	case ntohs(ETH_P_IP):
 277		break;
 278	default:
 279		goto out;
 280	}
 281
 282
 283	if (proto == IPPROTO_UDP) {
 284		udph = udp_hdr(skb);
 285		port = be16_to_cpu(udph->dest);
 286	}
 287
 288	/* HW supports offload of only one UDP port. Remove CSUM and GSO MASK
 289	 * for other UDP port tunnels
 290	 */
 291	if (port  != enic->vxlan.vxlan_udp_port_number)
 292		goto out;
 293
 294	return features;
 295
 296out:
 297	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 298}
 299
 300int enic_is_dynamic(struct enic *enic)
 301{
 302	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
 303}
 304
 305int enic_sriov_enabled(struct enic *enic)
 306{
 307	return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
 308}
 309
 310static int enic_is_sriov_vf(struct enic *enic)
 311{
 312	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
 313}
 314
 315int enic_is_valid_vf(struct enic *enic, int vf)
 316{
 317#ifdef CONFIG_PCI_IOV
 318	return vf >= 0 && vf < enic->num_vfs;
 319#else
 320	return 0;
 321#endif
 322}
 323
 324static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
 325{
 326	struct enic *enic = vnic_dev_priv(wq->vdev);
 327
 328	if (buf->sop)
 329		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
 330				 DMA_TO_DEVICE);
 331	else
 332		dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
 333			       DMA_TO_DEVICE);
 334
 335	if (buf->os_buf)
 336		dev_kfree_skb_any(buf->os_buf);
 337}
 338
 339static void enic_wq_free_buf(struct vnic_wq *wq,
 340	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
 341{
 342	enic_free_wq_buf(wq, buf);
 343}
 344
 345static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
 346	u8 type, u16 q_number, u16 completed_index, void *opaque)
 347{
 348	struct enic *enic = vnic_dev_priv(vdev);
 349
 350	spin_lock(&enic->wq_lock[q_number]);
 351
 352	vnic_wq_service(&enic->wq[q_number], cq_desc,
 353		completed_index, enic_wq_free_buf,
 354		opaque);
 355
 356	if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
 357	    vnic_wq_desc_avail(&enic->wq[q_number]) >=
 358	    (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
 359		netif_wake_subqueue(enic->netdev, q_number);
 360
 361	spin_unlock(&enic->wq_lock[q_number]);
 362
 363	return 0;
 364}
 365
 366static bool enic_log_q_error(struct enic *enic)
 367{
 368	unsigned int i;
 369	u32 error_status;
 370	bool err = false;
 371
 372	for (i = 0; i < enic->wq_count; i++) {
 373		error_status = vnic_wq_error_status(&enic->wq[i]);
 374		err |= error_status;
 375		if (error_status)
 376			netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
 377				i, error_status);
 378	}
 379
 380	for (i = 0; i < enic->rq_count; i++) {
 381		error_status = vnic_rq_error_status(&enic->rq[i]);
 382		err |= error_status;
 383		if (error_status)
 384			netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
 385				i, error_status);
 386	}
 387
 388	return err;
 389}
 390
 391static void enic_msglvl_check(struct enic *enic)
 392{
 393	u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
 394
 395	if (msg_enable != enic->msg_enable) {
 396		netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
 397			enic->msg_enable, msg_enable);
 398		enic->msg_enable = msg_enable;
 399	}
 400}
 401
 402static void enic_mtu_check(struct enic *enic)
 403{
 404	u32 mtu = vnic_dev_mtu(enic->vdev);
 405	struct net_device *netdev = enic->netdev;
 406
 407	if (mtu && mtu != enic->port_mtu) {
 408		enic->port_mtu = mtu;
 409		if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
 410			mtu = max_t(int, ENIC_MIN_MTU,
 411				min_t(int, ENIC_MAX_MTU, mtu));
 412			if (mtu != netdev->mtu)
 413				schedule_work(&enic->change_mtu_work);
 414		} else {
 415			if (mtu < netdev->mtu)
 416				netdev_warn(netdev,
 417					"interface MTU (%d) set higher "
 418					"than switch port MTU (%d)\n",
 419					netdev->mtu, mtu);
 420		}
 421	}
 422}
 423
 424static void enic_link_check(struct enic *enic)
 425{
 426	int link_status = vnic_dev_link_status(enic->vdev);
 427	int carrier_ok = netif_carrier_ok(enic->netdev);
 428
 429	if (link_status && !carrier_ok) {
 430		netdev_info(enic->netdev, "Link UP\n");
 431		netif_carrier_on(enic->netdev);
 432	} else if (!link_status && carrier_ok) {
 433		netdev_info(enic->netdev, "Link DOWN\n");
 434		netif_carrier_off(enic->netdev);
 435	}
 436}
 437
 438static void enic_notify_check(struct enic *enic)
 439{
 440	enic_msglvl_check(enic);
 441	enic_mtu_check(enic);
 442	enic_link_check(enic);
 443}
 444
 445#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
 446
 447static irqreturn_t enic_isr_legacy(int irq, void *data)
 448{
 449	struct net_device *netdev = data;
 450	struct enic *enic = netdev_priv(netdev);
 451	unsigned int io_intr = ENIC_LEGACY_IO_INTR;
 452	unsigned int err_intr = ENIC_LEGACY_ERR_INTR;
 453	unsigned int notify_intr = ENIC_LEGACY_NOTIFY_INTR;
 454	u32 pba;
 455
 456	vnic_intr_mask(&enic->intr[io_intr]);
 457
 458	pba = vnic_intr_legacy_pba(enic->legacy_pba);
 459	if (!pba) {
 460		vnic_intr_unmask(&enic->intr[io_intr]);
 461		return IRQ_NONE;	/* not our interrupt */
 462	}
 463
 464	if (ENIC_TEST_INTR(pba, notify_intr)) {
 465		enic_notify_check(enic);
 466		vnic_intr_return_all_credits(&enic->intr[notify_intr]);
 467	}
 468
 469	if (ENIC_TEST_INTR(pba, err_intr)) {
 470		vnic_intr_return_all_credits(&enic->intr[err_intr]);
 471		enic_log_q_error(enic);
 472		/* schedule recovery from WQ/RQ error */
 473		schedule_work(&enic->reset);
 474		return IRQ_HANDLED;
 475	}
 476
 477	if (ENIC_TEST_INTR(pba, io_intr))
 478		napi_schedule_irqoff(&enic->napi[0]);
 479	else
 480		vnic_intr_unmask(&enic->intr[io_intr]);
 481
 482	return IRQ_HANDLED;
 483}
 484
 485static irqreturn_t enic_isr_msi(int irq, void *data)
 486{
 487	struct enic *enic = data;
 488
 489	/* With MSI, there is no sharing of interrupts, so this is
 490	 * our interrupt and there is no need to ack it.  The device
 491	 * is not providing per-vector masking, so the OS will not
 492	 * write to PCI config space to mask/unmask the interrupt.
 493	 * We're using mask_on_assertion for MSI, so the device
 494	 * automatically masks the interrupt when the interrupt is
 495	 * generated.  Later, when exiting polling, the interrupt
 496	 * will be unmasked (see enic_poll).
 497	 *
 498	 * Also, the device uses the same PCIe Traffic Class (TC)
 499	 * for Memory Write data and MSI, so there are no ordering
 500	 * issues; the MSI will always arrive at the Root Complex
 501	 * _after_ corresponding Memory Writes (i.e. descriptor
 502	 * writes).
 503	 */
 504
 505	napi_schedule_irqoff(&enic->napi[0]);
 506
 507	return IRQ_HANDLED;
 508}
 509
 510static irqreturn_t enic_isr_msix(int irq, void *data)
 511{
 512	struct napi_struct *napi = data;
 513
 514	napi_schedule_irqoff(napi);
 515
 516	return IRQ_HANDLED;
 517}
 518
 519static irqreturn_t enic_isr_msix_err(int irq, void *data)
 520{
 521	struct enic *enic = data;
 522	unsigned int intr = enic_msix_err_intr(enic);
 523
 524	vnic_intr_return_all_credits(&enic->intr[intr]);
 525
 526	if (enic_log_q_error(enic))
 527		/* schedule recovery from WQ/RQ error */
 528		schedule_work(&enic->reset);
 529
 530	return IRQ_HANDLED;
 531}
 532
 533static irqreturn_t enic_isr_msix_notify(int irq, void *data)
 534{
 535	struct enic *enic = data;
 536	unsigned int intr = enic_msix_notify_intr(enic);
 537
 538	enic_notify_check(enic);
 539	vnic_intr_return_all_credits(&enic->intr[intr]);
 540
 541	return IRQ_HANDLED;
 542}
 543
 544static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
 545				  struct sk_buff *skb, unsigned int len_left,
 546				  int loopback)
 547{
 548	const skb_frag_t *frag;
 549	dma_addr_t dma_addr;
 550
 551	/* Queue additional data fragments */
 552	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
 553		len_left -= skb_frag_size(frag);
 554		dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
 555					    skb_frag_size(frag),
 556					    DMA_TO_DEVICE);
 557		if (unlikely(enic_dma_map_check(enic, dma_addr)))
 558			return -ENOMEM;
 559		enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
 560					(len_left == 0),	/* EOP? */
 561					loopback);
 562	}
 563
 564	return 0;
 565}
 566
 567static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
 568				  struct sk_buff *skb, int vlan_tag_insert,
 569				  unsigned int vlan_tag, int loopback)
 570{
 571	unsigned int head_len = skb_headlen(skb);
 572	unsigned int len_left = skb->len - head_len;
 573	int eop = (len_left == 0);
 574	dma_addr_t dma_addr;
 575	int err = 0;
 576
 577	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
 578				  DMA_TO_DEVICE);
 579	if (unlikely(enic_dma_map_check(enic, dma_addr)))
 580		return -ENOMEM;
 581
 582	/* Queue the main skb fragment. The fragments are no larger
 583	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
 584	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
 585	 * per fragment is queued.
 586	 */
 587	enic_queue_wq_desc(wq, skb, dma_addr, head_len,	vlan_tag_insert,
 588			   vlan_tag, eop, loopback);
 589
 590	if (!eop)
 591		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 592
 593	return err;
 594}
 595
 596static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
 597				     struct sk_buff *skb, int vlan_tag_insert,
 598				     unsigned int vlan_tag, int loopback)
 599{
 600	unsigned int head_len = skb_headlen(skb);
 601	unsigned int len_left = skb->len - head_len;
 602	unsigned int hdr_len = skb_checksum_start_offset(skb);
 603	unsigned int csum_offset = hdr_len + skb->csum_offset;
 604	int eop = (len_left == 0);
 605	dma_addr_t dma_addr;
 606	int err = 0;
 607
 608	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
 609				  DMA_TO_DEVICE);
 610	if (unlikely(enic_dma_map_check(enic, dma_addr)))
 611		return -ENOMEM;
 612
 613	/* Queue the main skb fragment. The fragments are no larger
 614	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
 615	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
 616	 * per fragment is queued.
 617	 */
 618	enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len,	csum_offset,
 619				   hdr_len, vlan_tag_insert, vlan_tag, eop,
 620				   loopback);
 621
 622	if (!eop)
 623		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 624
 625	return err;
 626}
 627
 628static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
 629{
 630	const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
 631
 632	switch (eth->h_proto) {
 633	case ntohs(ETH_P_IP):
 634		inner_ip_hdr(skb)->check = 0;
 635		inner_tcp_hdr(skb)->check =
 636			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
 637					   inner_ip_hdr(skb)->daddr, 0,
 638					   IPPROTO_TCP, 0);
 639		break;
 640	case ntohs(ETH_P_IPV6):
 641		inner_tcp_hdr(skb)->check =
 642			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
 643					 &inner_ipv6_hdr(skb)->daddr, 0,
 644					 IPPROTO_TCP, 0);
 645		break;
 646	default:
 647		WARN_ONCE(1, "Non ipv4/ipv6 inner pkt for encap offload");
 648		break;
 649	}
 650}
 651
 652static void enic_preload_tcp_csum(struct sk_buff *skb)
 653{
 654	/* Preload TCP csum field with IP pseudo hdr calculated
 655	 * with IP length set to zero.  HW will later add in length
 656	 * to each TCP segment resulting from the TSO.
 657	 */
 658
 659	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
 660		ip_hdr(skb)->check = 0;
 661		tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 662			ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
 663	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
 664		tcp_v6_gso_csum_prep(skb);
 665	}
 666}
 667
 668static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
 669				 struct sk_buff *skb, unsigned int mss,
 670				 int vlan_tag_insert, unsigned int vlan_tag,
 671				 int loopback)
 672{
 673	unsigned int frag_len_left = skb_headlen(skb);
 674	unsigned int len_left = skb->len - frag_len_left;
 675	int eop = (len_left == 0);
 676	unsigned int offset = 0;
 677	unsigned int hdr_len;
 678	dma_addr_t dma_addr;
 679	unsigned int len;
 680	skb_frag_t *frag;
 681
 682	if (skb->encapsulation) {
 683		hdr_len = skb_inner_tcp_all_headers(skb);
 684		enic_preload_tcp_csum_encap(skb);
 685	} else {
 686		hdr_len = skb_tcp_all_headers(skb);
 687		enic_preload_tcp_csum(skb);
 688	}
 689
 690	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
 691	 * for the main skb fragment
 692	 */
 693	while (frag_len_left) {
 694		len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
 695		dma_addr = dma_map_single(&enic->pdev->dev,
 696					  skb->data + offset, len,
 697					  DMA_TO_DEVICE);
 698		if (unlikely(enic_dma_map_check(enic, dma_addr)))
 699			return -ENOMEM;
 700		enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
 701				       vlan_tag_insert, vlan_tag,
 702				       eop && (len == frag_len_left), loopback);
 703		frag_len_left -= len;
 704		offset += len;
 705	}
 706
 707	if (eop)
 708		return 0;
 709
 710	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
 711	 * for additional data fragments
 712	 */
 713	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
 714		len_left -= skb_frag_size(frag);
 715		frag_len_left = skb_frag_size(frag);
 716		offset = 0;
 717
 718		while (frag_len_left) {
 719			len = min(frag_len_left,
 720				(unsigned int)WQ_ENET_MAX_DESC_LEN);
 721			dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
 722						    offset, len,
 723						    DMA_TO_DEVICE);
 724			if (unlikely(enic_dma_map_check(enic, dma_addr)))
 725				return -ENOMEM;
 726			enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
 727						(len_left == 0) &&
 728						 (len == frag_len_left),/*EOP*/
 729						loopback);
 730			frag_len_left -= len;
 731			offset += len;
 732		}
 733	}
 734
 735	return 0;
 736}
 737
 738static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
 739					  struct sk_buff *skb,
 740					  int vlan_tag_insert,
 741					  unsigned int vlan_tag, int loopback)
 742{
 743	unsigned int head_len = skb_headlen(skb);
 744	unsigned int len_left = skb->len - head_len;
 745	/* Hardware will overwrite the checksum fields, calculating from
 746	 * scratch and ignoring the value placed by software.
 747	 * Offload mode = 00
 748	 * mss[2], mss[1], mss[0] bits are set
 749	 */
 750	unsigned int mss_or_csum = 7;
 751	int eop = (len_left == 0);
 752	dma_addr_t dma_addr;
 753	int err = 0;
 754
 755	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
 756				  DMA_TO_DEVICE);
 757	if (unlikely(enic_dma_map_check(enic, dma_addr)))
 758		return -ENOMEM;
 759
 760	enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0,
 761			      vlan_tag_insert, vlan_tag,
 762			      WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop,
 763			      loopback);
 764	if (!eop)
 765		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 766
 767	return err;
 768}
 769
 770static inline int enic_queue_wq_skb(struct enic *enic,
 771	struct vnic_wq *wq, struct sk_buff *skb)
 772{
 773	unsigned int mss = skb_shinfo(skb)->gso_size;
 774	unsigned int vlan_tag = 0;
 775	int vlan_tag_insert = 0;
 776	int loopback = 0;
 777	int err;
 778
 779	if (skb_vlan_tag_present(skb)) {
 780		/* VLAN tag from trunking driver */
 781		vlan_tag_insert = 1;
 782		vlan_tag = skb_vlan_tag_get(skb);
 783	} else if (enic->loop_enable) {
 784		vlan_tag = enic->loop_tag;
 785		loopback = 1;
 786	}
 787
 788	if (mss)
 789		err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
 790					    vlan_tag_insert, vlan_tag,
 791					    loopback);
 792	else if (skb->encapsulation)
 793		err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
 794					      vlan_tag, loopback);
 795	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
 796		err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
 797						vlan_tag, loopback);
 798	else
 799		err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
 800					     vlan_tag, loopback);
 801	if (unlikely(err)) {
 802		struct vnic_wq_buf *buf;
 803
 804		buf = wq->to_use->prev;
 805		/* while not EOP of previous pkt && queue not empty.
 806		 * For all non EOP bufs, os_buf is NULL.
 807		 */
 808		while (!buf->os_buf && (buf->next != wq->to_clean)) {
 809			enic_free_wq_buf(wq, buf);
 810			wq->ring.desc_avail++;
 811			buf = buf->prev;
 812		}
 813		wq->to_use = buf->next;
 814		dev_kfree_skb(skb);
 815	}
 816	return err;
 817}
 818
 819/* netif_tx_lock held, process context with BHs disabled, or BH */
 820static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
 821	struct net_device *netdev)
 822{
 823	struct enic *enic = netdev_priv(netdev);
 824	struct vnic_wq *wq;
 825	unsigned int txq_map;
 826	struct netdev_queue *txq;
 827
 828	if (skb->len <= 0) {
 829		dev_kfree_skb_any(skb);
 830		return NETDEV_TX_OK;
 831	}
 832
 833	txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
 834	wq = &enic->wq[txq_map];
 835	txq = netdev_get_tx_queue(netdev, txq_map);
 836
 837	/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
 838	 * which is very likely.  In the off chance it's going to take
 839	 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
 840	 */
 841
 842	if (skb_shinfo(skb)->gso_size == 0 &&
 843	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
 844	    skb_linearize(skb)) {
 845		dev_kfree_skb_any(skb);
 846		return NETDEV_TX_OK;
 847	}
 848
 849	spin_lock(&enic->wq_lock[txq_map]);
 850
 851	if (vnic_wq_desc_avail(wq) <
 852	    skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
 853		netif_tx_stop_queue(txq);
 854		/* This is a hard error, log it */
 855		netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
 856		spin_unlock(&enic->wq_lock[txq_map]);
 857		return NETDEV_TX_BUSY;
 858	}
 859
 860	if (enic_queue_wq_skb(enic, wq, skb))
 861		goto error;
 862
 863	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
 864		netif_tx_stop_queue(txq);
 865	skb_tx_timestamp(skb);
 866	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
 867		vnic_wq_doorbell(wq);
 868
 869error:
 870	spin_unlock(&enic->wq_lock[txq_map]);
 871
 872	return NETDEV_TX_OK;
 873}
 874
 875/* dev_base_lock rwlock held, nominally process context */
 876static void enic_get_stats(struct net_device *netdev,
 877			   struct rtnl_link_stats64 *net_stats)
 878{
 879	struct enic *enic = netdev_priv(netdev);
 880	struct vnic_stats *stats;
 881	int err;
 882
 883	err = enic_dev_stats_dump(enic, &stats);
 884	/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
 885	 * For other failures, like devcmd failure, we return previously
 886	 * recorded stats.
 887	 */
 888	if (err == -ENOMEM)
 889		return;
 890
 891	net_stats->tx_packets = stats->tx.tx_frames_ok;
 892	net_stats->tx_bytes = stats->tx.tx_bytes_ok;
 893	net_stats->tx_errors = stats->tx.tx_errors;
 894	net_stats->tx_dropped = stats->tx.tx_drops;
 895
 896	net_stats->rx_packets = stats->rx.rx_frames_ok;
 897	net_stats->rx_bytes = stats->rx.rx_bytes_ok;
 898	net_stats->rx_errors = stats->rx.rx_errors;
 899	net_stats->multicast = stats->rx.rx_multicast_frames_ok;
 900	net_stats->rx_over_errors = enic->rq_truncated_pkts;
 901	net_stats->rx_crc_errors = enic->rq_bad_fcs;
 902	net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
 903}
 904
 905static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
 906{
 907	struct enic *enic = netdev_priv(netdev);
 908
 909	if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
 910		unsigned int mc_count = netdev_mc_count(netdev);
 911
 912		netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
 913			    ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
 914
 915		return -ENOSPC;
 916	}
 917
 918	enic_dev_add_addr(enic, mc_addr);
 919	enic->mc_count++;
 920
 921	return 0;
 922}
 923
 924static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
 925{
 926	struct enic *enic = netdev_priv(netdev);
 927
 928	enic_dev_del_addr(enic, mc_addr);
 929	enic->mc_count--;
 930
 931	return 0;
 932}
 933
 934static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
 935{
 936	struct enic *enic = netdev_priv(netdev);
 937
 938	if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
 939		unsigned int uc_count = netdev_uc_count(netdev);
 940
 941		netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
 942			    ENIC_UNICAST_PERFECT_FILTERS, uc_count);
 943
 944		return -ENOSPC;
 945	}
 946
 947	enic_dev_add_addr(enic, uc_addr);
 948	enic->uc_count++;
 949
 950	return 0;
 951}
 952
 953static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
 954{
 955	struct enic *enic = netdev_priv(netdev);
 956
 957	enic_dev_del_addr(enic, uc_addr);
 958	enic->uc_count--;
 959
 960	return 0;
 961}
 962
 963void enic_reset_addr_lists(struct enic *enic)
 964{
 965	struct net_device *netdev = enic->netdev;
 966
 967	__dev_uc_unsync(netdev, NULL);
 968	__dev_mc_unsync(netdev, NULL);
 969
 970	enic->mc_count = 0;
 971	enic->uc_count = 0;
 972	enic->flags = 0;
 973}
 974
 975static int enic_set_mac_addr(struct net_device *netdev, char *addr)
 976{
 977	struct enic *enic = netdev_priv(netdev);
 978
 979	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
 980		if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
 981			return -EADDRNOTAVAIL;
 982	} else {
 983		if (!is_valid_ether_addr(addr))
 984			return -EADDRNOTAVAIL;
 985	}
 986
 987	eth_hw_addr_set(netdev, addr);
 988
 989	return 0;
 990}
 991
 992static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
 993{
 994	struct enic *enic = netdev_priv(netdev);
 995	struct sockaddr *saddr = p;
 996	char *addr = saddr->sa_data;
 997	int err;
 998
 999	if (netif_running(enic->netdev)) {
1000		err = enic_dev_del_station_addr(enic);
1001		if (err)
1002			return err;
1003	}
1004
1005	err = enic_set_mac_addr(netdev, addr);
1006	if (err)
1007		return err;
1008
1009	if (netif_running(enic->netdev)) {
1010		err = enic_dev_add_station_addr(enic);
1011		if (err)
1012			return err;
1013	}
1014
1015	return err;
1016}
1017
1018static int enic_set_mac_address(struct net_device *netdev, void *p)
1019{
1020	struct sockaddr *saddr = p;
1021	char *addr = saddr->sa_data;
1022	struct enic *enic = netdev_priv(netdev);
1023	int err;
1024
1025	err = enic_dev_del_station_addr(enic);
1026	if (err)
1027		return err;
1028
1029	err = enic_set_mac_addr(netdev, addr);
1030	if (err)
1031		return err;
1032
1033	return enic_dev_add_station_addr(enic);
1034}
1035
1036/* netif_tx_lock held, BHs disabled */
1037static void enic_set_rx_mode(struct net_device *netdev)
1038{
1039	struct enic *enic = netdev_priv(netdev);
1040	int directed = 1;
1041	int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1042	int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1043	int promisc = (netdev->flags & IFF_PROMISC) ||
1044		netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1045	int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1046		netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1047	unsigned int flags = netdev->flags |
1048		(allmulti ? IFF_ALLMULTI : 0) |
1049		(promisc ? IFF_PROMISC : 0);
1050
1051	if (enic->flags != flags) {
1052		enic->flags = flags;
1053		enic_dev_packet_filter(enic, directed,
1054			multicast, broadcast, promisc, allmulti);
1055	}
1056
1057	if (!promisc) {
1058		__dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
1059		if (!allmulti)
1060			__dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
1061	}
1062}
1063
1064/* netif_tx_lock held, BHs disabled */
1065static void enic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1066{
1067	struct enic *enic = netdev_priv(netdev);
1068	schedule_work(&enic->tx_hang_reset);
1069}
1070
1071static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1072{
1073	struct enic *enic = netdev_priv(netdev);
1074	struct enic_port_profile *pp;
1075	int err;
1076
1077	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1078	if (err)
1079		return err;
1080
1081	if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
1082		if (vf == PORT_SELF_VF) {
1083			memcpy(pp->vf_mac, mac, ETH_ALEN);
1084			return 0;
1085		} else {
1086			/*
1087			 * For sriov vf's set the mac in hw
1088			 */
1089			ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1090				vnic_dev_set_mac_addr, mac);
1091			return enic_dev_status_to_errno(err);
1092		}
1093	} else
1094		return -EINVAL;
1095}
1096
1097static int enic_set_vf_port(struct net_device *netdev, int vf,
1098	struct nlattr *port[])
1099{
1100	static const u8 zero_addr[ETH_ALEN] = {};
1101	struct enic *enic = netdev_priv(netdev);
1102	struct enic_port_profile prev_pp;
1103	struct enic_port_profile *pp;
1104	int err = 0, restore_pp = 1;
1105
1106	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1107	if (err)
1108		return err;
1109
1110	if (!port[IFLA_PORT_REQUEST])
1111		return -EOPNOTSUPP;
1112
1113	memcpy(&prev_pp, pp, sizeof(*enic->pp));
1114	memset(pp, 0, sizeof(*enic->pp));
1115
1116	pp->set |= ENIC_SET_REQUEST;
1117	pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1118
1119	if (port[IFLA_PORT_PROFILE]) {
 
 
 
 
1120		pp->set |= ENIC_SET_NAME;
1121		memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
1122			PORT_PROFILE_MAX);
1123	}
1124
1125	if (port[IFLA_PORT_INSTANCE_UUID]) {
 
 
 
 
1126		pp->set |= ENIC_SET_INSTANCE;
1127		memcpy(pp->instance_uuid,
1128			nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1129	}
1130
1131	if (port[IFLA_PORT_HOST_UUID]) {
 
 
 
 
1132		pp->set |= ENIC_SET_HOST;
1133		memcpy(pp->host_uuid,
1134			nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1135	}
1136
1137	if (vf == PORT_SELF_VF) {
1138		/* Special case handling: mac came from IFLA_VF_MAC */
1139		if (!is_zero_ether_addr(prev_pp.vf_mac))
1140			memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
1141
1142		if (is_zero_ether_addr(netdev->dev_addr))
1143			eth_hw_addr_random(netdev);
1144	} else {
1145		/* SR-IOV VF: get mac from adapter */
1146		ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1147			vnic_dev_get_mac_addr, pp->mac_addr);
1148		if (err) {
1149			netdev_err(netdev, "Error getting mac for vf %d\n", vf);
1150			memcpy(pp, &prev_pp, sizeof(*pp));
1151			return enic_dev_status_to_errno(err);
1152		}
1153	}
1154
1155	err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
1156	if (err) {
1157		if (restore_pp) {
1158			/* Things are still the way they were: Implicit
1159			 * DISASSOCIATE failed
1160			 */
1161			memcpy(pp, &prev_pp, sizeof(*pp));
1162		} else {
1163			memset(pp, 0, sizeof(*pp));
1164			if (vf == PORT_SELF_VF)
1165				eth_hw_addr_set(netdev, zero_addr);
1166		}
1167	} else {
1168		/* Set flag to indicate that the port assoc/disassoc
1169		 * request has been sent out to fw
1170		 */
1171		pp->set |= ENIC_PORT_REQUEST_APPLIED;
1172
1173		/* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1174		if (pp->request == PORT_REQUEST_DISASSOCIATE) {
1175			eth_zero_addr(pp->mac_addr);
1176			if (vf == PORT_SELF_VF)
1177				eth_hw_addr_set(netdev, zero_addr);
1178		}
1179	}
1180
1181	if (vf == PORT_SELF_VF)
1182		eth_zero_addr(pp->vf_mac);
1183
1184	return err;
1185}
1186
1187static int enic_get_vf_port(struct net_device *netdev, int vf,
1188	struct sk_buff *skb)
1189{
1190	struct enic *enic = netdev_priv(netdev);
1191	u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1192	struct enic_port_profile *pp;
1193	int err;
1194
1195	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1196	if (err)
1197		return err;
1198
1199	if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
1200		return -ENODATA;
1201
1202	err = enic_process_get_pp_request(enic, vf, pp->request, &response);
1203	if (err)
1204		return err;
1205
1206	if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
1207	    nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
1208	    ((pp->set & ENIC_SET_NAME) &&
1209	     nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
1210	    ((pp->set & ENIC_SET_INSTANCE) &&
1211	     nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1212		     pp->instance_uuid)) ||
1213	    ((pp->set & ENIC_SET_HOST) &&
1214	     nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
1215		goto nla_put_failure;
1216	return 0;
1217
1218nla_put_failure:
1219	return -EMSGSIZE;
1220}
1221
1222static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1223{
1224	struct enic *enic = vnic_dev_priv(rq->vdev);
1225
1226	if (!buf->os_buf)
1227		return;
1228
1229	dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1230			 DMA_FROM_DEVICE);
1231	dev_kfree_skb_any(buf->os_buf);
1232	buf->os_buf = NULL;
1233}
1234
1235static int enic_rq_alloc_buf(struct vnic_rq *rq)
1236{
1237	struct enic *enic = vnic_dev_priv(rq->vdev);
1238	struct net_device *netdev = enic->netdev;
1239	struct sk_buff *skb;
1240	unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1241	unsigned int os_buf_index = 0;
1242	dma_addr_t dma_addr;
1243	struct vnic_rq_buf *buf = rq->to_use;
1244
1245	if (buf->os_buf) {
1246		enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
1247				   buf->len);
1248
1249		return 0;
1250	}
1251	skb = netdev_alloc_skb_ip_align(netdev, len);
1252	if (!skb)
1253		return -ENOMEM;
1254
1255	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
1256				  DMA_FROM_DEVICE);
1257	if (unlikely(enic_dma_map_check(enic, dma_addr))) {
1258		dev_kfree_skb(skb);
1259		return -ENOMEM;
1260	}
1261
1262	enic_queue_rq_desc(rq, skb, os_buf_index,
1263		dma_addr, len);
1264
1265	return 0;
1266}
1267
1268static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
1269				      u32 pkt_len)
1270{
1271	if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
1272		pkt_size->large_pkt_bytes_cnt += pkt_len;
1273	else
1274		pkt_size->small_pkt_bytes_cnt += pkt_len;
1275}
1276
1277static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
1278			     struct vnic_rq_buf *buf, u16 len)
1279{
1280	struct enic *enic = netdev_priv(netdev);
1281	struct sk_buff *new_skb;
1282
1283	if (len > enic->rx_copybreak)
1284		return false;
1285	new_skb = netdev_alloc_skb_ip_align(netdev, len);
1286	if (!new_skb)
1287		return false;
1288	dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
1289				DMA_FROM_DEVICE);
1290	memcpy(new_skb->data, (*skb)->data, len);
1291	*skb = new_skb;
1292
1293	return true;
1294}
1295
1296static void enic_rq_indicate_buf(struct vnic_rq *rq,
1297	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1298	int skipped, void *opaque)
1299{
1300	struct enic *enic = vnic_dev_priv(rq->vdev);
1301	struct net_device *netdev = enic->netdev;
1302	struct sk_buff *skb;
1303	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1304
1305	u8 type, color, eop, sop, ingress_port, vlan_stripped;
1306	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1307	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1308	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1309	u8 packet_error;
1310	u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1311	u32 rss_hash;
1312	bool outer_csum_ok = true, encap = false;
1313
1314	if (skipped)
1315		return;
1316
1317	skb = buf->os_buf;
1318
1319	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1320		&type, &color, &q_number, &completed_index,
1321		&ingress_port, &fcoe, &eop, &sop, &rss_type,
1322		&csum_not_calc, &rss_hash, &bytes_written,
1323		&packet_error, &vlan_stripped, &vlan_tci, &checksum,
1324		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1325		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1326		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1327		&fcs_ok);
1328
1329	if (packet_error) {
1330
1331		if (!fcs_ok) {
1332			if (bytes_written > 0)
1333				enic->rq_bad_fcs++;
1334			else if (bytes_written == 0)
1335				enic->rq_truncated_pkts++;
1336		}
1337
1338		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1339				 DMA_FROM_DEVICE);
1340		dev_kfree_skb_any(skb);
1341		buf->os_buf = NULL;
1342
1343		return;
1344	}
1345
1346	if (eop && bytes_written > 0) {
1347
1348		/* Good receive
1349		 */
1350
1351		if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1352			buf->os_buf = NULL;
1353			dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
1354					 buf->len, DMA_FROM_DEVICE);
1355		}
1356		prefetch(skb->data - NET_IP_ALIGN);
1357
1358		skb_put(skb, bytes_written);
1359		skb->protocol = eth_type_trans(skb, netdev);
1360		skb_record_rx_queue(skb, q_number);
1361		if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
1362		    (type == 3)) {
1363			switch (rss_type) {
1364			case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
1365			case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
1366			case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
1367				skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
1368				break;
1369			case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
1370			case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
1371			case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
1372				skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
1373				break;
1374			}
1375		}
1376		if (enic->vxlan.vxlan_udp_port_number) {
1377			switch (enic->vxlan.patch_level) {
1378			case 0:
1379				if (fcoe) {
1380					encap = true;
1381					outer_csum_ok = fcoe_fc_crc_ok;
1382				}
1383				break;
1384			case 2:
1385				if ((type == 7) &&
1386				    (rss_hash & BIT(0))) {
1387					encap = true;
1388					outer_csum_ok = (rss_hash & BIT(1)) &&
1389							(rss_hash & BIT(2));
1390				}
1391				break;
1392			}
1393		}
1394
1395		/* Hardware does not provide whole packet checksum. It only
1396		 * provides pseudo checksum. Since hw validates the packet
1397		 * checksum but not provide us the checksum value. use
1398		 * CHECSUM_UNNECESSARY.
1399		 *
1400		 * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
1401		 * inner csum_ok. outer_csum_ok is set by hw when outer udp
1402		 * csum is correct or is zero.
1403		 */
1404		if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
1405		    tcp_udp_csum_ok && outer_csum_ok &&
1406		    (ipv4_csum_ok || ipv6)) {
1407			skb->ip_summed = CHECKSUM_UNNECESSARY;
1408			skb->csum_level = encap;
1409		}
1410
1411		if (vlan_stripped)
1412			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1413
1414		skb_mark_napi_id(skb, &enic->napi[rq->index]);
1415		if (!(netdev->features & NETIF_F_GRO))
1416			netif_receive_skb(skb);
1417		else
1418			napi_gro_receive(&enic->napi[q_number], skb);
1419		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1420			enic_intr_update_pkt_size(&cq->pkt_size_counter,
1421						  bytes_written);
1422	} else {
1423
1424		/* Buffer overflow
1425		 */
1426
1427		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1428				 DMA_FROM_DEVICE);
1429		dev_kfree_skb_any(skb);
1430		buf->os_buf = NULL;
1431	}
1432}
1433
1434static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1435	u8 type, u16 q_number, u16 completed_index, void *opaque)
1436{
1437	struct enic *enic = vnic_dev_priv(vdev);
1438
1439	vnic_rq_service(&enic->rq[q_number], cq_desc,
1440		completed_index, VNIC_RQ_RETURN_DESC,
1441		enic_rq_indicate_buf, opaque);
1442
1443	return 0;
1444}
1445
1446static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1447{
1448	unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1449	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1450	u32 timer = cq->tobe_rx_coal_timeval;
1451
1452	if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1453		vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1454		cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1455	}
1456}
1457
1458static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1459{
1460	struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1461	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1462	struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1463	int index;
1464	u32 timer;
1465	u32 range_start;
1466	u32 traffic;
1467	u64 delta;
1468	ktime_t now = ktime_get();
1469
1470	delta = ktime_us_delta(now, cq->prev_ts);
1471	if (delta < ENIC_AIC_TS_BREAK)
1472		return;
1473	cq->prev_ts = now;
1474
1475	traffic = pkt_size_counter->large_pkt_bytes_cnt +
1476		  pkt_size_counter->small_pkt_bytes_cnt;
1477	/* The table takes Mbps
1478	 * traffic *= 8    => bits
1479	 * traffic *= (10^6 / delta)    => bps
1480	 * traffic /= 10^6     => Mbps
1481	 *
1482	 * Combining, traffic *= (8 / delta)
1483	 */
1484
1485	traffic <<= 3;
1486	traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
1487
1488	for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1489		if (traffic < mod_table[index].rx_rate)
1490			break;
1491	range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1492		       pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1493		      rx_coal->small_pkt_range_start :
1494		      rx_coal->large_pkt_range_start;
1495	timer = range_start + ((rx_coal->range_end - range_start) *
1496			       mod_table[index].range_percent / 100);
1497	/* Damping */
1498	cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1499
1500	pkt_size_counter->large_pkt_bytes_cnt = 0;
1501	pkt_size_counter->small_pkt_bytes_cnt = 0;
1502}
1503
1504static int enic_poll(struct napi_struct *napi, int budget)
1505{
1506	struct net_device *netdev = napi->dev;
1507	struct enic *enic = netdev_priv(netdev);
1508	unsigned int cq_rq = enic_cq_rq(enic, 0);
1509	unsigned int cq_wq = enic_cq_wq(enic, 0);
1510	unsigned int intr = ENIC_LEGACY_IO_INTR;
1511	unsigned int rq_work_to_do = budget;
1512	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
1513	unsigned int  work_done, rq_work_done = 0, wq_work_done;
1514	int err;
1515
1516	wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1517				       enic_wq_service, NULL);
1518
1519	if (budget > 0)
1520		rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1521			rq_work_to_do, enic_rq_service, NULL);
1522
1523	/* Accumulate intr event credits for this polling
1524	 * cycle.  An intr event is the completion of a
1525	 * a WQ or RQ packet.
1526	 */
1527
1528	work_done = rq_work_done + wq_work_done;
1529
1530	if (work_done > 0)
1531		vnic_intr_return_credits(&enic->intr[intr],
1532			work_done,
1533			0 /* don't unmask intr */,
1534			0 /* don't reset intr timer */);
1535
1536	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1537
1538	/* Buffer allocation failed. Stay in polling
1539	 * mode so we can try to fill the ring again.
1540	 */
1541
1542	if (err)
1543		rq_work_done = rq_work_to_do;
1544	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1545		/* Call the function which refreshes the intr coalescing timer
1546		 * value based on the traffic.
1547		 */
1548		enic_calc_int_moderation(enic, &enic->rq[0]);
1549
1550	if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
1551
1552		/* Some work done, but not enough to stay in polling,
1553		 * exit polling
1554		 */
1555
1556		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1557			enic_set_int_moderation(enic, &enic->rq[0]);
1558		vnic_intr_unmask(&enic->intr[intr]);
1559	}
1560
1561	return rq_work_done;
1562}
1563
1564#ifdef CONFIG_RFS_ACCEL
1565static void enic_free_rx_cpu_rmap(struct enic *enic)
1566{
1567	free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1568	enic->netdev->rx_cpu_rmap = NULL;
1569}
1570
1571static void enic_set_rx_cpu_rmap(struct enic *enic)
1572{
1573	int i, res;
1574
1575	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1576		enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1577		if (unlikely(!enic->netdev->rx_cpu_rmap))
1578			return;
1579		for (i = 0; i < enic->rq_count; i++) {
1580			res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1581					       enic->msix_entry[i].vector);
1582			if (unlikely(res)) {
1583				enic_free_rx_cpu_rmap(enic);
1584				return;
1585			}
1586		}
1587	}
1588}
1589
1590#else
1591
1592static void enic_free_rx_cpu_rmap(struct enic *enic)
1593{
1594}
1595
1596static void enic_set_rx_cpu_rmap(struct enic *enic)
1597{
1598}
1599
1600#endif /* CONFIG_RFS_ACCEL */
1601
1602static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
1603{
1604	struct net_device *netdev = napi->dev;
1605	struct enic *enic = netdev_priv(netdev);
1606	unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
1607	struct vnic_wq *wq = &enic->wq[wq_index];
1608	unsigned int cq;
1609	unsigned int intr;
1610	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
1611	unsigned int wq_work_done;
1612	unsigned int wq_irq;
1613
1614	wq_irq = wq->index;
1615	cq = enic_cq_wq(enic, wq_irq);
1616	intr = enic_msix_wq_intr(enic, wq_irq);
1617	wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
1618				       enic_wq_service, NULL);
1619
1620	vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
1621				 0 /* don't unmask intr */,
1622				 1 /* reset intr timer */);
1623	if (!wq_work_done) {
1624		napi_complete(napi);
1625		vnic_intr_unmask(&enic->intr[intr]);
1626		return 0;
1627	}
1628
1629	return budget;
1630}
1631
1632static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1633{
1634	struct net_device *netdev = napi->dev;
1635	struct enic *enic = netdev_priv(netdev);
1636	unsigned int rq = (napi - &enic->napi[0]);
1637	unsigned int cq = enic_cq_rq(enic, rq);
1638	unsigned int intr = enic_msix_rq_intr(enic, rq);
1639	unsigned int work_to_do = budget;
1640	unsigned int work_done = 0;
1641	int err;
1642
1643	/* Service RQ
1644	 */
1645
1646	if (budget > 0)
1647		work_done = vnic_cq_service(&enic->cq[cq],
1648			work_to_do, enic_rq_service, NULL);
1649
1650	/* Return intr event credits for this polling
1651	 * cycle.  An intr event is the completion of a
1652	 * RQ packet.
1653	 */
1654
1655	if (work_done > 0)
1656		vnic_intr_return_credits(&enic->intr[intr],
1657			work_done,
1658			0 /* don't unmask intr */,
1659			0 /* don't reset intr timer */);
1660
1661	err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1662
1663	/* Buffer allocation failed. Stay in polling mode
1664	 * so we can try to fill the ring again.
1665	 */
1666
1667	if (err)
1668		work_done = work_to_do;
1669	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1670		/* Call the function which refreshes the intr coalescing timer
1671		 * value based on the traffic.
1672		 */
1673		enic_calc_int_moderation(enic, &enic->rq[rq]);
1674
1675	if ((work_done < budget) && napi_complete_done(napi, work_done)) {
1676
1677		/* Some work done, but not enough to stay in polling,
1678		 * exit polling
1679		 */
1680
1681		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1682			enic_set_int_moderation(enic, &enic->rq[rq]);
1683		vnic_intr_unmask(&enic->intr[intr]);
1684	}
1685
1686	return work_done;
1687}
1688
1689static void enic_notify_timer(struct timer_list *t)
1690{
1691	struct enic *enic = from_timer(enic, t, notify_timer);
1692
1693	enic_notify_check(enic);
1694
1695	mod_timer(&enic->notify_timer,
1696		round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1697}
1698
1699static void enic_free_intr(struct enic *enic)
1700{
1701	struct net_device *netdev = enic->netdev;
1702	unsigned int i;
1703
1704	enic_free_rx_cpu_rmap(enic);
1705	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1706	case VNIC_DEV_INTR_MODE_INTX:
1707		free_irq(enic->pdev->irq, netdev);
1708		break;
1709	case VNIC_DEV_INTR_MODE_MSI:
1710		free_irq(enic->pdev->irq, enic);
1711		break;
1712	case VNIC_DEV_INTR_MODE_MSIX:
1713		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1714			if (enic->msix[i].requested)
1715				free_irq(enic->msix_entry[i].vector,
1716					enic->msix[i].devid);
1717		break;
1718	default:
1719		break;
1720	}
1721}
1722
1723static int enic_request_intr(struct enic *enic)
1724{
1725	struct net_device *netdev = enic->netdev;
1726	unsigned int i, intr;
1727	int err = 0;
1728
1729	enic_set_rx_cpu_rmap(enic);
1730	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1731
1732	case VNIC_DEV_INTR_MODE_INTX:
1733
1734		err = request_irq(enic->pdev->irq, enic_isr_legacy,
1735			IRQF_SHARED, netdev->name, netdev);
1736		break;
1737
1738	case VNIC_DEV_INTR_MODE_MSI:
1739
1740		err = request_irq(enic->pdev->irq, enic_isr_msi,
1741			0, netdev->name, enic);
1742		break;
1743
1744	case VNIC_DEV_INTR_MODE_MSIX:
1745
1746		for (i = 0; i < enic->rq_count; i++) {
1747			intr = enic_msix_rq_intr(enic, i);
1748			snprintf(enic->msix[intr].devname,
1749				sizeof(enic->msix[intr].devname),
1750				"%s-rx-%u", netdev->name, i);
1751			enic->msix[intr].isr = enic_isr_msix;
1752			enic->msix[intr].devid = &enic->napi[i];
1753		}
1754
1755		for (i = 0; i < enic->wq_count; i++) {
1756			int wq = enic_cq_wq(enic, i);
1757
1758			intr = enic_msix_wq_intr(enic, i);
1759			snprintf(enic->msix[intr].devname,
1760				sizeof(enic->msix[intr].devname),
1761				"%s-tx-%u", netdev->name, i);
1762			enic->msix[intr].isr = enic_isr_msix;
1763			enic->msix[intr].devid = &enic->napi[wq];
1764		}
1765
1766		intr = enic_msix_err_intr(enic);
1767		snprintf(enic->msix[intr].devname,
1768			sizeof(enic->msix[intr].devname),
1769			"%s-err", netdev->name);
1770		enic->msix[intr].isr = enic_isr_msix_err;
1771		enic->msix[intr].devid = enic;
1772
1773		intr = enic_msix_notify_intr(enic);
1774		snprintf(enic->msix[intr].devname,
1775			sizeof(enic->msix[intr].devname),
1776			"%s-notify", netdev->name);
1777		enic->msix[intr].isr = enic_isr_msix_notify;
1778		enic->msix[intr].devid = enic;
1779
1780		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1781			enic->msix[i].requested = 0;
1782
1783		for (i = 0; i < enic->intr_count; i++) {
1784			err = request_irq(enic->msix_entry[i].vector,
1785				enic->msix[i].isr, 0,
1786				enic->msix[i].devname,
1787				enic->msix[i].devid);
1788			if (err) {
1789				enic_free_intr(enic);
1790				break;
1791			}
1792			enic->msix[i].requested = 1;
1793		}
1794
1795		break;
1796
1797	default:
1798		break;
1799	}
1800
1801	return err;
1802}
1803
1804static void enic_synchronize_irqs(struct enic *enic)
1805{
1806	unsigned int i;
1807
1808	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1809	case VNIC_DEV_INTR_MODE_INTX:
1810	case VNIC_DEV_INTR_MODE_MSI:
1811		synchronize_irq(enic->pdev->irq);
1812		break;
1813	case VNIC_DEV_INTR_MODE_MSIX:
1814		for (i = 0; i < enic->intr_count; i++)
1815			synchronize_irq(enic->msix_entry[i].vector);
1816		break;
1817	default:
1818		break;
1819	}
1820}
1821
1822static void enic_set_rx_coal_setting(struct enic *enic)
1823{
1824	unsigned int speed;
1825	int index = -1;
1826	struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1827
1828	/* 1. Read the link speed from fw
1829	 * 2. Pick the default range for the speed
1830	 * 3. Update it in enic->rx_coalesce_setting
1831	 */
1832	speed = vnic_dev_port_speed(enic->vdev);
1833	if (ENIC_LINK_SPEED_10G < speed)
1834		index = ENIC_LINK_40G_INDEX;
1835	else if (ENIC_LINK_SPEED_4G < speed)
1836		index = ENIC_LINK_10G_INDEX;
1837	else
1838		index = ENIC_LINK_4G_INDEX;
1839
1840	rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1841	rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1842	rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1843
1844	/* Start with the value provided by UCSM */
1845	for (index = 0; index < enic->rq_count; index++)
1846		enic->cq[index].cur_rx_coal_timeval =
1847				enic->config.intr_timer_usec;
1848
1849	rx_coal->use_adaptive_rx_coalesce = 1;
1850}
1851
1852static int enic_dev_notify_set(struct enic *enic)
1853{
1854	int err;
1855
1856	spin_lock_bh(&enic->devcmd_lock);
1857	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1858	case VNIC_DEV_INTR_MODE_INTX:
1859		err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR);
1860		break;
1861	case VNIC_DEV_INTR_MODE_MSIX:
1862		err = vnic_dev_notify_set(enic->vdev,
1863			enic_msix_notify_intr(enic));
1864		break;
1865	default:
1866		err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1867		break;
1868	}
1869	spin_unlock_bh(&enic->devcmd_lock);
1870
1871	return err;
1872}
1873
1874static void enic_notify_timer_start(struct enic *enic)
1875{
1876	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1877	case VNIC_DEV_INTR_MODE_MSI:
1878		mod_timer(&enic->notify_timer, jiffies);
1879		break;
1880	default:
1881		/* Using intr for notification for INTx/MSI-X */
1882		break;
1883	}
1884}
1885
1886/* rtnl lock is held, process context */
1887static int enic_open(struct net_device *netdev)
1888{
1889	struct enic *enic = netdev_priv(netdev);
1890	unsigned int i;
1891	int err, ret;
1892
1893	err = enic_request_intr(enic);
1894	if (err) {
1895		netdev_err(netdev, "Unable to request irq.\n");
1896		return err;
1897	}
1898	enic_init_affinity_hint(enic);
1899	enic_set_affinity_hint(enic);
1900
1901	err = enic_dev_notify_set(enic);
1902	if (err) {
1903		netdev_err(netdev,
1904			"Failed to alloc notify buffer, aborting.\n");
1905		goto err_out_free_intr;
1906	}
1907
1908	for (i = 0; i < enic->rq_count; i++) {
1909		/* enable rq before updating rq desc */
1910		vnic_rq_enable(&enic->rq[i]);
1911		vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1912		/* Need at least one buffer on ring to get going */
1913		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1914			netdev_err(netdev, "Unable to alloc receive buffers\n");
1915			err = -ENOMEM;
1916			goto err_out_free_rq;
1917		}
1918	}
1919
1920	for (i = 0; i < enic->wq_count; i++)
1921		vnic_wq_enable(&enic->wq[i]);
1922
1923	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1924		enic_dev_add_station_addr(enic);
1925
1926	enic_set_rx_mode(netdev);
1927
1928	netif_tx_wake_all_queues(netdev);
1929
1930	for (i = 0; i < enic->rq_count; i++)
1931		napi_enable(&enic->napi[i]);
1932
1933	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1934		for (i = 0; i < enic->wq_count; i++)
1935			napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
1936	enic_dev_enable(enic);
1937
1938	for (i = 0; i < enic->intr_count; i++)
1939		vnic_intr_unmask(&enic->intr[i]);
1940
1941	enic_notify_timer_start(enic);
1942	enic_rfs_timer_start(enic);
1943
1944	return 0;
1945
1946err_out_free_rq:
1947	for (i = 0; i < enic->rq_count; i++) {
1948		ret = vnic_rq_disable(&enic->rq[i]);
1949		if (!ret)
1950			vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1951	}
1952	enic_dev_notify_unset(enic);
1953err_out_free_intr:
1954	enic_unset_affinity_hint(enic);
1955	enic_free_intr(enic);
1956
1957	return err;
1958}
1959
1960/* rtnl lock is held, process context */
1961static int enic_stop(struct net_device *netdev)
1962{
1963	struct enic *enic = netdev_priv(netdev);
1964	unsigned int i;
1965	int err;
1966
1967	for (i = 0; i < enic->intr_count; i++) {
1968		vnic_intr_mask(&enic->intr[i]);
1969		(void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1970	}
1971
1972	enic_synchronize_irqs(enic);
1973
1974	del_timer_sync(&enic->notify_timer);
1975	enic_rfs_flw_tbl_free(enic);
1976
1977	enic_dev_disable(enic);
1978
1979	for (i = 0; i < enic->rq_count; i++)
1980		napi_disable(&enic->napi[i]);
1981
1982	netif_carrier_off(netdev);
1983	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1984		for (i = 0; i < enic->wq_count; i++)
1985			napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
1986	netif_tx_disable(netdev);
1987
1988	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1989		enic_dev_del_station_addr(enic);
1990
1991	for (i = 0; i < enic->wq_count; i++) {
1992		err = vnic_wq_disable(&enic->wq[i]);
1993		if (err)
1994			return err;
1995	}
1996	for (i = 0; i < enic->rq_count; i++) {
1997		err = vnic_rq_disable(&enic->rq[i]);
1998		if (err)
1999			return err;
2000	}
2001
2002	enic_dev_notify_unset(enic);
2003	enic_unset_affinity_hint(enic);
2004	enic_free_intr(enic);
2005
2006	for (i = 0; i < enic->wq_count; i++)
2007		vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
2008	for (i = 0; i < enic->rq_count; i++)
2009		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
2010	for (i = 0; i < enic->cq_count; i++)
2011		vnic_cq_clean(&enic->cq[i]);
2012	for (i = 0; i < enic->intr_count; i++)
2013		vnic_intr_clean(&enic->intr[i]);
2014
2015	return 0;
2016}
2017
2018static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
2019{
2020	bool running = netif_running(netdev);
2021	int err = 0;
2022
2023	ASSERT_RTNL();
2024	if (running) {
2025		err = enic_stop(netdev);
2026		if (err)
2027			return err;
2028	}
2029
2030	netdev->mtu = new_mtu;
2031
2032	if (running) {
2033		err = enic_open(netdev);
2034		if (err)
2035			return err;
2036	}
2037
2038	return 0;
2039}
2040
2041static int enic_change_mtu(struct net_device *netdev, int new_mtu)
2042{
2043	struct enic *enic = netdev_priv(netdev);
2044
2045	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2046		return -EOPNOTSUPP;
2047
2048	if (netdev->mtu > enic->port_mtu)
2049		netdev_warn(netdev,
2050			    "interface MTU (%d) set higher than port MTU (%d)\n",
2051			    netdev->mtu, enic->port_mtu);
2052
2053	return _enic_change_mtu(netdev, new_mtu);
2054}
2055
2056static void enic_change_mtu_work(struct work_struct *work)
2057{
2058	struct enic *enic = container_of(work, struct enic, change_mtu_work);
2059	struct net_device *netdev = enic->netdev;
2060	int new_mtu = vnic_dev_mtu(enic->vdev);
2061
2062	rtnl_lock();
2063	(void)_enic_change_mtu(netdev, new_mtu);
2064	rtnl_unlock();
2065
2066	netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
2067}
2068
2069#ifdef CONFIG_NET_POLL_CONTROLLER
2070static void enic_poll_controller(struct net_device *netdev)
2071{
2072	struct enic *enic = netdev_priv(netdev);
2073	struct vnic_dev *vdev = enic->vdev;
2074	unsigned int i, intr;
2075
2076	switch (vnic_dev_get_intr_mode(vdev)) {
2077	case VNIC_DEV_INTR_MODE_MSIX:
2078		for (i = 0; i < enic->rq_count; i++) {
2079			intr = enic_msix_rq_intr(enic, i);
2080			enic_isr_msix(enic->msix_entry[intr].vector,
2081				      &enic->napi[i]);
2082		}
2083
2084		for (i = 0; i < enic->wq_count; i++) {
2085			intr = enic_msix_wq_intr(enic, i);
2086			enic_isr_msix(enic->msix_entry[intr].vector,
2087				      &enic->napi[enic_cq_wq(enic, i)]);
2088		}
2089
2090		break;
2091	case VNIC_DEV_INTR_MODE_MSI:
2092		enic_isr_msi(enic->pdev->irq, enic);
2093		break;
2094	case VNIC_DEV_INTR_MODE_INTX:
2095		enic_isr_legacy(enic->pdev->irq, netdev);
2096		break;
2097	default:
2098		break;
2099	}
2100}
2101#endif
2102
2103static int enic_dev_wait(struct vnic_dev *vdev,
2104	int (*start)(struct vnic_dev *, int),
2105	int (*finished)(struct vnic_dev *, int *),
2106	int arg)
2107{
2108	unsigned long time;
2109	int done;
2110	int err;
2111
2112	err = start(vdev, arg);
2113	if (err)
2114		return err;
2115
2116	/* Wait for func to complete...2 seconds max
2117	 */
2118
2119	time = jiffies + (HZ * 2);
2120	do {
2121
2122		err = finished(vdev, &done);
2123		if (err)
2124			return err;
2125
2126		if (done)
2127			return 0;
2128
2129		schedule_timeout_uninterruptible(HZ / 10);
2130
2131	} while (time_after(time, jiffies));
2132
2133	return -ETIMEDOUT;
2134}
2135
2136static int enic_dev_open(struct enic *enic)
2137{
2138	int err;
2139	u32 flags = CMD_OPENF_IG_DESCCACHE;
2140
2141	err = enic_dev_wait(enic->vdev, vnic_dev_open,
2142		vnic_dev_open_done, flags);
2143	if (err)
2144		dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
2145			err);
2146
2147	return err;
2148}
2149
2150static int enic_dev_soft_reset(struct enic *enic)
2151{
2152	int err;
2153
2154	err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
2155			    vnic_dev_soft_reset_done, 0);
2156	if (err)
2157		netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
2158			   err);
2159
2160	return err;
2161}
2162
2163static int enic_dev_hang_reset(struct enic *enic)
2164{
2165	int err;
2166
2167	err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
2168		vnic_dev_hang_reset_done, 0);
2169	if (err)
2170		netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
2171			err);
2172
2173	return err;
2174}
2175
2176int __enic_set_rsskey(struct enic *enic)
2177{
2178	union vnic_rss_key *rss_key_buf_va;
2179	dma_addr_t rss_key_buf_pa;
2180	int i, kidx, bidx, err;
2181
2182	rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev,
2183					    sizeof(union vnic_rss_key),
2184					    &rss_key_buf_pa, GFP_ATOMIC);
2185	if (!rss_key_buf_va)
2186		return -ENOMEM;
2187
2188	for (i = 0; i < ENIC_RSS_LEN; i++) {
2189		kidx = i / ENIC_RSS_BYTES_PER_KEY;
2190		bidx = i % ENIC_RSS_BYTES_PER_KEY;
2191		rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
2192	}
2193	spin_lock_bh(&enic->devcmd_lock);
2194	err = enic_set_rss_key(enic,
2195		rss_key_buf_pa,
2196		sizeof(union vnic_rss_key));
2197	spin_unlock_bh(&enic->devcmd_lock);
2198
2199	dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key),
2200			  rss_key_buf_va, rss_key_buf_pa);
2201
2202	return err;
2203}
2204
2205static int enic_set_rsskey(struct enic *enic)
2206{
2207	netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
2208
2209	return __enic_set_rsskey(enic);
2210}
2211
2212static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
2213{
2214	dma_addr_t rss_cpu_buf_pa;
2215	union vnic_rss_cpu *rss_cpu_buf_va = NULL;
2216	unsigned int i;
2217	int err;
2218
2219	rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev,
2220					    sizeof(union vnic_rss_cpu),
2221					    &rss_cpu_buf_pa, GFP_ATOMIC);
2222	if (!rss_cpu_buf_va)
2223		return -ENOMEM;
2224
2225	for (i = 0; i < (1 << rss_hash_bits); i++)
2226		(*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
2227
2228	spin_lock_bh(&enic->devcmd_lock);
2229	err = enic_set_rss_cpu(enic,
2230		rss_cpu_buf_pa,
2231		sizeof(union vnic_rss_cpu));
2232	spin_unlock_bh(&enic->devcmd_lock);
2233
2234	dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu),
2235			  rss_cpu_buf_va, rss_cpu_buf_pa);
2236
2237	return err;
2238}
2239
2240static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2241	u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
2242{
2243	const u8 tso_ipid_split_en = 0;
2244	const u8 ig_vlan_strip_en = 1;
2245	int err;
2246
2247	/* Enable VLAN tag stripping.
2248	*/
2249
2250	spin_lock_bh(&enic->devcmd_lock);
2251	err = enic_set_nic_cfg(enic,
2252		rss_default_cpu, rss_hash_type,
2253		rss_hash_bits, rss_base_cpu,
2254		rss_enable, tso_ipid_split_en,
2255		ig_vlan_strip_en);
2256	spin_unlock_bh(&enic->devcmd_lock);
2257
2258	return err;
2259}
2260
2261static int enic_set_rss_nic_cfg(struct enic *enic)
2262{
2263	struct device *dev = enic_get_dev(enic);
2264	const u8 rss_default_cpu = 0;
2265	const u8 rss_hash_bits = 7;
2266	const u8 rss_base_cpu = 0;
2267	u8 rss_hash_type;
2268	int res;
2269	u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2270
2271	spin_lock_bh(&enic->devcmd_lock);
2272	res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
2273	spin_unlock_bh(&enic->devcmd_lock);
2274	if (res) {
2275		/* defaults for old adapters
2276		 */
2277		rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4	|
2278				NIC_CFG_RSS_HASH_TYPE_TCP_IPV4	|
2279				NIC_CFG_RSS_HASH_TYPE_IPV6	|
2280				NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2281	}
2282
2283	if (rss_enable) {
2284		if (!enic_set_rsskey(enic)) {
2285			if (enic_set_rsscpu(enic, rss_hash_bits)) {
2286				rss_enable = 0;
2287				dev_warn(dev, "RSS disabled, "
2288					"Failed to set RSS cpu indirection table.");
2289			}
2290		} else {
2291			rss_enable = 0;
2292			dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2293		}
2294	}
2295
2296	return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2297		rss_hash_bits, rss_base_cpu, rss_enable);
2298}
2299
2300static void enic_set_api_busy(struct enic *enic, bool busy)
2301{
2302	spin_lock(&enic->enic_api_lock);
2303	enic->enic_api_busy = busy;
2304	spin_unlock(&enic->enic_api_lock);
2305}
2306
2307static void enic_reset(struct work_struct *work)
2308{
2309	struct enic *enic = container_of(work, struct enic, reset);
2310
2311	if (!netif_running(enic->netdev))
2312		return;
2313
2314	rtnl_lock();
2315
2316	/* Stop any activity from infiniband */
2317	enic_set_api_busy(enic, true);
2318
2319	enic_stop(enic->netdev);
2320	enic_dev_soft_reset(enic);
2321	enic_reset_addr_lists(enic);
2322	enic_init_vnic_resources(enic);
2323	enic_set_rss_nic_cfg(enic);
2324	enic_dev_set_ig_vlan_rewrite_mode(enic);
2325	enic_open(enic->netdev);
2326
2327	/* Allow infiniband to fiddle with the device again */
2328	enic_set_api_busy(enic, false);
2329
2330	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2331
2332	rtnl_unlock();
2333}
2334
2335static void enic_tx_hang_reset(struct work_struct *work)
2336{
2337	struct enic *enic = container_of(work, struct enic, tx_hang_reset);
2338
2339	rtnl_lock();
2340
2341	/* Stop any activity from infiniband */
2342	enic_set_api_busy(enic, true);
2343
2344	enic_dev_hang_notify(enic);
2345	enic_stop(enic->netdev);
2346	enic_dev_hang_reset(enic);
2347	enic_reset_addr_lists(enic);
2348	enic_init_vnic_resources(enic);
2349	enic_set_rss_nic_cfg(enic);
2350	enic_dev_set_ig_vlan_rewrite_mode(enic);
2351	enic_open(enic->netdev);
2352
2353	/* Allow infiniband to fiddle with the device again */
2354	enic_set_api_busy(enic, false);
2355
2356	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2357
2358	rtnl_unlock();
2359}
2360
2361static int enic_set_intr_mode(struct enic *enic)
2362{
2363	unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2364	unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2365	unsigned int i;
2366
2367	/* Set interrupt mode (INTx, MSI, MSI-X) depending
2368	 * on system capabilities.
2369	 *
2370	 * Try MSI-X first
2371	 *
2372	 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2373	 * (the second to last INTR is used for WQ/RQ errors)
2374	 * (the last INTR is used for notifications)
2375	 */
2376
2377	BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2378	for (i = 0; i < n + m + 2; i++)
2379		enic->msix_entry[i].entry = i;
2380
2381	/* Use multiple RQs if RSS is enabled
2382	 */
2383
2384	if (ENIC_SETTING(enic, RSS) &&
2385	    enic->config.intr_mode < 1 &&
2386	    enic->rq_count >= n &&
2387	    enic->wq_count >= m &&
2388	    enic->cq_count >= n + m &&
2389	    enic->intr_count >= n + m + 2) {
2390
2391		if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2392					  n + m + 2, n + m + 2) > 0) {
2393
2394			enic->rq_count = n;
2395			enic->wq_count = m;
2396			enic->cq_count = n + m;
2397			enic->intr_count = n + m + 2;
2398
2399			vnic_dev_set_intr_mode(enic->vdev,
2400				VNIC_DEV_INTR_MODE_MSIX);
2401
2402			return 0;
2403		}
2404	}
2405
2406	if (enic->config.intr_mode < 1 &&
2407	    enic->rq_count >= 1 &&
2408	    enic->wq_count >= m &&
2409	    enic->cq_count >= 1 + m &&
2410	    enic->intr_count >= 1 + m + 2) {
2411		if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2412					  1 + m + 2, 1 + m + 2) > 0) {
2413
2414			enic->rq_count = 1;
2415			enic->wq_count = m;
2416			enic->cq_count = 1 + m;
2417			enic->intr_count = 1 + m + 2;
2418
2419			vnic_dev_set_intr_mode(enic->vdev,
2420				VNIC_DEV_INTR_MODE_MSIX);
2421
2422			return 0;
2423		}
2424	}
2425
2426	/* Next try MSI
2427	 *
2428	 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2429	 */
2430
2431	if (enic->config.intr_mode < 2 &&
2432	    enic->rq_count >= 1 &&
2433	    enic->wq_count >= 1 &&
2434	    enic->cq_count >= 2 &&
2435	    enic->intr_count >= 1 &&
2436	    !pci_enable_msi(enic->pdev)) {
2437
2438		enic->rq_count = 1;
2439		enic->wq_count = 1;
2440		enic->cq_count = 2;
2441		enic->intr_count = 1;
2442
2443		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2444
2445		return 0;
2446	}
2447
2448	/* Next try INTx
2449	 *
2450	 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2451	 * (the first INTR is used for WQ/RQ)
2452	 * (the second INTR is used for WQ/RQ errors)
2453	 * (the last INTR is used for notifications)
2454	 */
2455
2456	if (enic->config.intr_mode < 3 &&
2457	    enic->rq_count >= 1 &&
2458	    enic->wq_count >= 1 &&
2459	    enic->cq_count >= 2 &&
2460	    enic->intr_count >= 3) {
2461
2462		enic->rq_count = 1;
2463		enic->wq_count = 1;
2464		enic->cq_count = 2;
2465		enic->intr_count = 3;
2466
2467		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2468
2469		return 0;
2470	}
2471
2472	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2473
2474	return -EINVAL;
2475}
2476
2477static void enic_clear_intr_mode(struct enic *enic)
2478{
2479	switch (vnic_dev_get_intr_mode(enic->vdev)) {
2480	case VNIC_DEV_INTR_MODE_MSIX:
2481		pci_disable_msix(enic->pdev);
2482		break;
2483	case VNIC_DEV_INTR_MODE_MSI:
2484		pci_disable_msi(enic->pdev);
2485		break;
2486	default:
2487		break;
2488	}
2489
2490	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2491}
2492
2493static const struct net_device_ops enic_netdev_dynamic_ops = {
2494	.ndo_open		= enic_open,
2495	.ndo_stop		= enic_stop,
2496	.ndo_start_xmit		= enic_hard_start_xmit,
2497	.ndo_get_stats64	= enic_get_stats,
2498	.ndo_validate_addr	= eth_validate_addr,
2499	.ndo_set_rx_mode	= enic_set_rx_mode,
2500	.ndo_set_mac_address	= enic_set_mac_address_dynamic,
2501	.ndo_change_mtu		= enic_change_mtu,
2502	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
2503	.ndo_vlan_rx_kill_vid	= enic_vlan_rx_kill_vid,
2504	.ndo_tx_timeout		= enic_tx_timeout,
2505	.ndo_set_vf_port	= enic_set_vf_port,
2506	.ndo_get_vf_port	= enic_get_vf_port,
2507	.ndo_set_vf_mac		= enic_set_vf_mac,
2508#ifdef CONFIG_NET_POLL_CONTROLLER
2509	.ndo_poll_controller	= enic_poll_controller,
2510#endif
2511#ifdef CONFIG_RFS_ACCEL
2512	.ndo_rx_flow_steer	= enic_rx_flow_steer,
2513#endif
2514	.ndo_features_check	= enic_features_check,
2515};
2516
2517static const struct net_device_ops enic_netdev_ops = {
2518	.ndo_open		= enic_open,
2519	.ndo_stop		= enic_stop,
2520	.ndo_start_xmit		= enic_hard_start_xmit,
2521	.ndo_get_stats64	= enic_get_stats,
2522	.ndo_validate_addr	= eth_validate_addr,
2523	.ndo_set_mac_address	= enic_set_mac_address,
2524	.ndo_set_rx_mode	= enic_set_rx_mode,
2525	.ndo_change_mtu		= enic_change_mtu,
2526	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
2527	.ndo_vlan_rx_kill_vid	= enic_vlan_rx_kill_vid,
2528	.ndo_tx_timeout		= enic_tx_timeout,
2529	.ndo_set_vf_port	= enic_set_vf_port,
2530	.ndo_get_vf_port	= enic_get_vf_port,
2531	.ndo_set_vf_mac		= enic_set_vf_mac,
2532#ifdef CONFIG_NET_POLL_CONTROLLER
2533	.ndo_poll_controller	= enic_poll_controller,
2534#endif
2535#ifdef CONFIG_RFS_ACCEL
2536	.ndo_rx_flow_steer	= enic_rx_flow_steer,
2537#endif
2538	.ndo_features_check	= enic_features_check,
2539};
2540
2541static void enic_dev_deinit(struct enic *enic)
2542{
2543	unsigned int i;
2544
2545	for (i = 0; i < enic->rq_count; i++)
2546		__netif_napi_del(&enic->napi[i]);
2547
2548	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
2549		for (i = 0; i < enic->wq_count; i++)
2550			__netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
2551
2552	/* observe RCU grace period after __netif_napi_del() calls */
2553	synchronize_net();
2554
2555	enic_free_vnic_resources(enic);
2556	enic_clear_intr_mode(enic);
2557	enic_free_affinity_hint(enic);
2558}
2559
2560static void enic_kdump_kernel_config(struct enic *enic)
2561{
2562	if (is_kdump_kernel()) {
2563		dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
2564		enic->rq_count = 1;
2565		enic->wq_count = 1;
2566		enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
2567		enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
2568		enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
2569	}
2570}
2571
2572static int enic_dev_init(struct enic *enic)
2573{
2574	struct device *dev = enic_get_dev(enic);
2575	struct net_device *netdev = enic->netdev;
2576	unsigned int i;
2577	int err;
2578
2579	/* Get interrupt coalesce timer info */
2580	err = enic_dev_intr_coal_timer_info(enic);
2581	if (err) {
2582		dev_warn(dev, "Using default conversion factor for "
2583			"interrupt coalesce timer\n");
2584		vnic_dev_intr_coal_timer_info_default(enic->vdev);
2585	}
2586
2587	/* Get vNIC configuration
2588	 */
2589
2590	err = enic_get_vnic_config(enic);
2591	if (err) {
2592		dev_err(dev, "Get vNIC configuration failed, aborting\n");
2593		return err;
2594	}
2595
2596	/* Get available resource counts
2597	 */
2598
2599	enic_get_res_counts(enic);
2600
2601	/* modify resource count if we are in kdump_kernel
2602	 */
2603	enic_kdump_kernel_config(enic);
2604
2605	/* Set interrupt mode based on resource counts and system
2606	 * capabilities
2607	 */
2608
2609	err = enic_set_intr_mode(enic);
2610	if (err) {
2611		dev_err(dev, "Failed to set intr mode based on resource "
2612			"counts and system capabilities, aborting\n");
2613		return err;
2614	}
2615
2616	/* Allocate and configure vNIC resources
2617	 */
2618
2619	err = enic_alloc_vnic_resources(enic);
2620	if (err) {
2621		dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2622		goto err_out_free_vnic_resources;
2623	}
2624
2625	enic_init_vnic_resources(enic);
2626
2627	err = enic_set_rss_nic_cfg(enic);
2628	if (err) {
2629		dev_err(dev, "Failed to config nic, aborting\n");
2630		goto err_out_free_vnic_resources;
2631	}
2632
2633	switch (vnic_dev_get_intr_mode(enic->vdev)) {
2634	default:
2635		netif_napi_add(netdev, &enic->napi[0], enic_poll);
2636		break;
2637	case VNIC_DEV_INTR_MODE_MSIX:
2638		for (i = 0; i < enic->rq_count; i++) {
2639			netif_napi_add(netdev, &enic->napi[i],
2640				       enic_poll_msix_rq);
2641		}
2642		for (i = 0; i < enic->wq_count; i++)
2643			netif_napi_add(netdev,
2644				       &enic->napi[enic_cq_wq(enic, i)],
2645				       enic_poll_msix_wq);
2646		break;
2647	}
2648
2649	return 0;
2650
2651err_out_free_vnic_resources:
2652	enic_free_affinity_hint(enic);
2653	enic_clear_intr_mode(enic);
2654	enic_free_vnic_resources(enic);
2655
2656	return err;
2657}
2658
2659static void enic_iounmap(struct enic *enic)
2660{
2661	unsigned int i;
2662
2663	for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2664		if (enic->bar[i].vaddr)
2665			iounmap(enic->bar[i].vaddr);
2666}
2667
2668static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2669{
2670	struct device *dev = &pdev->dev;
2671	struct net_device *netdev;
2672	struct enic *enic;
2673	int using_dac = 0;
2674	unsigned int i;
2675	int err;
2676#ifdef CONFIG_PCI_IOV
2677	int pos = 0;
2678#endif
2679	int num_pps = 1;
2680
2681	/* Allocate net device structure and initialize.  Private
2682	 * instance data is initialized to zero.
2683	 */
2684
2685	netdev = alloc_etherdev_mqs(sizeof(struct enic),
2686				    ENIC_RQ_MAX, ENIC_WQ_MAX);
2687	if (!netdev)
2688		return -ENOMEM;
2689
2690	pci_set_drvdata(pdev, netdev);
2691
2692	SET_NETDEV_DEV(netdev, &pdev->dev);
2693
2694	enic = netdev_priv(netdev);
2695	enic->netdev = netdev;
2696	enic->pdev = pdev;
2697
2698	/* Setup PCI resources
2699	 */
2700
2701	err = pci_enable_device_mem(pdev);
2702	if (err) {
2703		dev_err(dev, "Cannot enable PCI device, aborting\n");
2704		goto err_out_free_netdev;
2705	}
2706
2707	err = pci_request_regions(pdev, DRV_NAME);
2708	if (err) {
2709		dev_err(dev, "Cannot request PCI regions, aborting\n");
2710		goto err_out_disable_device;
2711	}
2712
2713	pci_set_master(pdev);
2714
2715	/* Query PCI controller on system for DMA addressing
2716	 * limitation for the device.  Try 47-bit first, and
2717	 * fail to 32-bit.
2718	 */
2719
2720	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
2721	if (err) {
2722		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2723		if (err) {
2724			dev_err(dev, "No usable DMA configuration, aborting\n");
2725			goto err_out_release_regions;
2726		}
2727	} else {
2728		using_dac = 1;
2729	}
2730
2731	/* Map vNIC resources from BAR0-5
2732	 */
2733
2734	for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2735		if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2736			continue;
2737		enic->bar[i].len = pci_resource_len(pdev, i);
2738		enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2739		if (!enic->bar[i].vaddr) {
2740			dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2741			err = -ENODEV;
2742			goto err_out_iounmap;
2743		}
2744		enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2745	}
2746
2747	/* Register vNIC device
2748	 */
2749
2750	enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2751		ARRAY_SIZE(enic->bar));
2752	if (!enic->vdev) {
2753		dev_err(dev, "vNIC registration failed, aborting\n");
2754		err = -ENODEV;
2755		goto err_out_iounmap;
2756	}
2757
2758	err = vnic_devcmd_init(enic->vdev);
2759
2760	if (err)
2761		goto err_out_vnic_unregister;
2762
2763#ifdef CONFIG_PCI_IOV
2764	/* Get number of subvnics */
2765	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2766	if (pos) {
2767		pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
2768			&enic->num_vfs);
2769		if (enic->num_vfs) {
2770			err = pci_enable_sriov(pdev, enic->num_vfs);
2771			if (err) {
2772				dev_err(dev, "SRIOV enable failed, aborting."
2773					" pci_enable_sriov() returned %d\n",
2774					err);
2775				goto err_out_vnic_unregister;
2776			}
2777			enic->priv_flags |= ENIC_SRIOV_ENABLED;
2778			num_pps = enic->num_vfs;
2779		}
2780	}
2781#endif
2782
2783	/* Allocate structure for port profiles */
2784	enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2785	if (!enic->pp) {
2786		err = -ENOMEM;
2787		goto err_out_disable_sriov_pp;
2788	}
2789
2790	/* Issue device open to get device in known state
2791	 */
2792
2793	err = enic_dev_open(enic);
2794	if (err) {
2795		dev_err(dev, "vNIC dev open failed, aborting\n");
2796		goto err_out_disable_sriov;
2797	}
2798
2799	/* Setup devcmd lock
2800	 */
2801
2802	spin_lock_init(&enic->devcmd_lock);
2803	spin_lock_init(&enic->enic_api_lock);
2804
2805	/*
2806	 * Set ingress vlan rewrite mode before vnic initialization
2807	 */
2808
2809	err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2810	if (err) {
2811		dev_err(dev,
2812			"Failed to set ingress vlan rewrite mode, aborting.\n");
2813		goto err_out_dev_close;
2814	}
2815
2816	/* Issue device init to initialize the vnic-to-switch link.
2817	 * We'll start with carrier off and wait for link UP
2818	 * notification later to turn on carrier.  We don't need
2819	 * to wait here for the vnic-to-switch link initialization
2820	 * to complete; link UP notification is the indication that
2821	 * the process is complete.
2822	 */
2823
2824	netif_carrier_off(netdev);
2825
2826	/* Do not call dev_init for a dynamic vnic.
2827	 * For a dynamic vnic, init_prov_info will be
2828	 * called later by an upper layer.
2829	 */
2830
2831	if (!enic_is_dynamic(enic)) {
2832		err = vnic_dev_init(enic->vdev, 0);
2833		if (err) {
2834			dev_err(dev, "vNIC dev init failed, aborting\n");
2835			goto err_out_dev_close;
2836		}
2837	}
2838
2839	err = enic_dev_init(enic);
2840	if (err) {
2841		dev_err(dev, "Device initialization failed, aborting\n");
2842		goto err_out_dev_close;
2843	}
2844
2845	netif_set_real_num_tx_queues(netdev, enic->wq_count);
2846	netif_set_real_num_rx_queues(netdev, enic->rq_count);
2847
2848	/* Setup notification timer, HW reset task, and wq locks
2849	 */
2850
2851	timer_setup(&enic->notify_timer, enic_notify_timer, 0);
2852
2853	enic_rfs_flw_tbl_init(enic);
2854	enic_set_rx_coal_setting(enic);
2855	INIT_WORK(&enic->reset, enic_reset);
2856	INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
2857	INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2858
2859	for (i = 0; i < enic->wq_count; i++)
2860		spin_lock_init(&enic->wq_lock[i]);
2861
2862	/* Register net device
2863	 */
2864
2865	enic->port_mtu = enic->config.mtu;
2866
2867	err = enic_set_mac_addr(netdev, enic->mac_addr);
2868	if (err) {
2869		dev_err(dev, "Invalid MAC address, aborting\n");
2870		goto err_out_dev_deinit;
2871	}
2872
2873	enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2874	/* rx coalesce time already got initialized. This gets used
2875	 * if adaptive coal is turned off
2876	 */
2877	enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2878
2879	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2880		netdev->netdev_ops = &enic_netdev_dynamic_ops;
2881	else
2882		netdev->netdev_ops = &enic_netdev_ops;
2883
2884	netdev->watchdog_timeo = 2 * HZ;
2885	enic_set_ethtool_ops(netdev);
2886
2887	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2888	if (ENIC_SETTING(enic, LOOP)) {
2889		netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2890		enic->loop_enable = 1;
2891		enic->loop_tag = enic->config.loop_tag;
2892		dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2893	}
2894	if (ENIC_SETTING(enic, TXCSUM))
2895		netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2896	if (ENIC_SETTING(enic, TSO))
2897		netdev->hw_features |= NETIF_F_TSO |
2898			NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2899	if (ENIC_SETTING(enic, RSS))
2900		netdev->hw_features |= NETIF_F_RXHASH;
2901	if (ENIC_SETTING(enic, RXCSUM))
2902		netdev->hw_features |= NETIF_F_RXCSUM;
2903	if (ENIC_SETTING(enic, VXLAN)) {
2904		u64 patch_level;
2905		u64 a1 = 0;
2906
2907		netdev->hw_enc_features |= NETIF_F_RXCSUM		|
2908					   NETIF_F_TSO			|
2909					   NETIF_F_TSO6			|
2910					   NETIF_F_TSO_ECN		|
2911					   NETIF_F_GSO_UDP_TUNNEL	|
2912					   NETIF_F_HW_CSUM		|
2913					   NETIF_F_GSO_UDP_TUNNEL_CSUM;
2914		netdev->hw_features |= netdev->hw_enc_features;
2915		/* get bit mask from hw about supported offload bit level
2916		 * BIT(0) = fw supports patch_level 0
2917		 *	    fcoe bit = encap
2918		 *	    fcoe_fc_crc_ok = outer csum ok
2919		 * BIT(1) = always set by fw
2920		 * BIT(2) = fw supports patch_level 2
2921		 *	    BIT(0) in rss_hash = encap
2922		 *	    BIT(1,2) in rss_hash = outer_ip_csum_ok/
2923		 *				   outer_tcp_csum_ok
2924		 * used in enic_rq_indicate_buf
2925		 */
2926		err = vnic_dev_get_supported_feature_ver(enic->vdev,
2927							 VIC_FEATURE_VXLAN,
2928							 &patch_level, &a1);
2929		if (err)
2930			patch_level = 0;
2931		enic->vxlan.flags = (u8)a1;
2932		/* mask bits that are supported by driver
2933		 */
2934		patch_level &= BIT_ULL(0) | BIT_ULL(2);
2935		patch_level = fls(patch_level);
2936		patch_level = patch_level ? patch_level - 1 : 0;
2937		enic->vxlan.patch_level = patch_level;
2938
2939		if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 ||
2940		    enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) {
2941			netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4;
2942			if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)
2943				netdev->udp_tunnel_nic_info = &enic_udp_tunnels;
2944		}
2945	}
2946
2947	netdev->features |= netdev->hw_features;
2948	netdev->vlan_features |= netdev->features;
2949
2950#ifdef CONFIG_RFS_ACCEL
2951	netdev->hw_features |= NETIF_F_NTUPLE;
2952#endif
2953
2954	if (using_dac)
2955		netdev->features |= NETIF_F_HIGHDMA;
2956
2957	netdev->priv_flags |= IFF_UNICAST_FLT;
2958
2959	/* MTU range: 68 - 9000 */
2960	netdev->min_mtu = ENIC_MIN_MTU;
2961	netdev->max_mtu = ENIC_MAX_MTU;
2962	netdev->mtu	= enic->port_mtu;
2963
2964	err = register_netdev(netdev);
2965	if (err) {
2966		dev_err(dev, "Cannot register net device, aborting\n");
2967		goto err_out_dev_deinit;
2968	}
2969	enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
2970
2971	return 0;
2972
2973err_out_dev_deinit:
2974	enic_dev_deinit(enic);
2975err_out_dev_close:
2976	vnic_dev_close(enic->vdev);
2977err_out_disable_sriov:
2978	kfree(enic->pp);
2979err_out_disable_sriov_pp:
2980#ifdef CONFIG_PCI_IOV
2981	if (enic_sriov_enabled(enic)) {
2982		pci_disable_sriov(pdev);
2983		enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2984	}
2985#endif
2986err_out_vnic_unregister:
2987	vnic_dev_unregister(enic->vdev);
2988err_out_iounmap:
2989	enic_iounmap(enic);
2990err_out_release_regions:
2991	pci_release_regions(pdev);
2992err_out_disable_device:
2993	pci_disable_device(pdev);
2994err_out_free_netdev:
2995	free_netdev(netdev);
2996
2997	return err;
2998}
2999
3000static void enic_remove(struct pci_dev *pdev)
3001{
3002	struct net_device *netdev = pci_get_drvdata(pdev);
3003
3004	if (netdev) {
3005		struct enic *enic = netdev_priv(netdev);
3006
3007		cancel_work_sync(&enic->reset);
3008		cancel_work_sync(&enic->change_mtu_work);
3009		unregister_netdev(netdev);
3010		enic_dev_deinit(enic);
3011		vnic_dev_close(enic->vdev);
3012#ifdef CONFIG_PCI_IOV
3013		if (enic_sriov_enabled(enic)) {
3014			pci_disable_sriov(pdev);
3015			enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
3016		}
3017#endif
3018		kfree(enic->pp);
3019		vnic_dev_unregister(enic->vdev);
3020		enic_iounmap(enic);
3021		pci_release_regions(pdev);
3022		pci_disable_device(pdev);
3023		free_netdev(netdev);
3024	}
3025}
3026
3027static struct pci_driver enic_driver = {
3028	.name = DRV_NAME,
3029	.id_table = enic_id_table,
3030	.probe = enic_probe,
3031	.remove = enic_remove,
3032};
3033
3034module_pci_driver(enic_driver);
v6.9.4
   1/*
   2 * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4 *
   5 * This program is free software; you may redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2 of the License.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16 * SOFTWARE.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/kernel.h>
  22#include <linux/string.h>
  23#include <linux/errno.h>
  24#include <linux/types.h>
  25#include <linux/init.h>
  26#include <linux/interrupt.h>
  27#include <linux/workqueue.h>
  28#include <linux/pci.h>
  29#include <linux/netdevice.h>
  30#include <linux/etherdevice.h>
  31#include <linux/if.h>
  32#include <linux/if_ether.h>
  33#include <linux/if_vlan.h>
  34#include <linux/in.h>
  35#include <linux/ip.h>
  36#include <linux/ipv6.h>
  37#include <linux/tcp.h>
  38#include <linux/rtnetlink.h>
  39#include <linux/prefetch.h>
  40#include <net/ip6_checksum.h>
  41#include <linux/ktime.h>
  42#include <linux/numa.h>
  43#ifdef CONFIG_RFS_ACCEL
  44#include <linux/cpu_rmap.h>
  45#endif
  46#include <linux/crash_dump.h>
  47#include <net/busy_poll.h>
  48#include <net/vxlan.h>
  49
  50#include "cq_enet_desc.h"
  51#include "vnic_dev.h"
  52#include "vnic_intr.h"
  53#include "vnic_stats.h"
  54#include "vnic_vic.h"
  55#include "enic_res.h"
  56#include "enic.h"
  57#include "enic_dev.h"
  58#include "enic_pp.h"
  59#include "enic_clsf.h"
  60
  61#define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
  62#define WQ_ENET_MAX_DESC_LEN		(1 << WQ_ENET_LEN_BITS)
  63#define MAX_TSO				(1 << 16)
  64#define ENIC_DESC_MAX_SPLITS		(MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
  65
  66#define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
  67#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
  68#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */
  69
  70#define RX_COPYBREAK_DEFAULT		256
  71
  72/* Supported devices */
  73static const struct pci_device_id enic_id_table[] = {
  74	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
  75	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
  76	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
  77	{ 0, }	/* end of table */
  78};
  79
  80MODULE_DESCRIPTION(DRV_DESCRIPTION);
  81MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
  82MODULE_LICENSE("GPL");
  83MODULE_DEVICE_TABLE(pci, enic_id_table);
  84
  85#define ENIC_LARGE_PKT_THRESHOLD		1000
  86#define ENIC_MAX_COALESCE_TIMERS		10
  87/*  Interrupt moderation table, which will be used to decide the
  88 *  coalescing timer values
  89 *  {rx_rate in Mbps, mapping percentage of the range}
  90 */
  91static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
  92	{4000,  0},
  93	{4400, 10},
  94	{5060, 20},
  95	{5230, 30},
  96	{5540, 40},
  97	{5820, 50},
  98	{6120, 60},
  99	{6435, 70},
 100	{6745, 80},
 101	{7000, 90},
 102	{0xFFFFFFFF, 100}
 103};
 104
 105/* This table helps the driver to pick different ranges for rx coalescing
 106 * timer depending on the link speed.
 107 */
 108static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
 109	{0,  0}, /* 0  - 4  Gbps */
 110	{0,  3}, /* 4  - 10 Gbps */
 111	{3,  6}, /* 10 - 40 Gbps */
 112};
 113
 114static void enic_init_affinity_hint(struct enic *enic)
 115{
 116	int numa_node = dev_to_node(&enic->pdev->dev);
 117	int i;
 118
 119	for (i = 0; i < enic->intr_count; i++) {
 120		if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
 121		    (cpumask_available(enic->msix[i].affinity_mask) &&
 122		     !cpumask_empty(enic->msix[i].affinity_mask)))
 123			continue;
 124		if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
 125				       GFP_KERNEL))
 126			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
 127					enic->msix[i].affinity_mask);
 128	}
 129}
 130
 131static void enic_free_affinity_hint(struct enic *enic)
 132{
 133	int i;
 134
 135	for (i = 0; i < enic->intr_count; i++) {
 136		if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i))
 137			continue;
 138		free_cpumask_var(enic->msix[i].affinity_mask);
 139	}
 140}
 141
 142static void enic_set_affinity_hint(struct enic *enic)
 143{
 144	int i;
 145	int err;
 146
 147	for (i = 0; i < enic->intr_count; i++) {
 148		if (enic_is_err_intr(enic, i)		||
 149		    enic_is_notify_intr(enic, i)	||
 150		    !cpumask_available(enic->msix[i].affinity_mask) ||
 151		    cpumask_empty(enic->msix[i].affinity_mask))
 152			continue;
 153		err = irq_update_affinity_hint(enic->msix_entry[i].vector,
 154					       enic->msix[i].affinity_mask);
 155		if (err)
 156			netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
 157				    err);
 158	}
 159
 160	for (i = 0; i < enic->wq_count; i++) {
 161		int wq_intr = enic_msix_wq_intr(enic, i);
 162
 163		if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
 164		    !cpumask_empty(enic->msix[wq_intr].affinity_mask))
 165			netif_set_xps_queue(enic->netdev,
 166					    enic->msix[wq_intr].affinity_mask,
 167					    i);
 168	}
 169}
 170
 171static void enic_unset_affinity_hint(struct enic *enic)
 172{
 173	int i;
 174
 175	for (i = 0; i < enic->intr_count; i++)
 176		irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
 177}
 178
 179static int enic_udp_tunnel_set_port(struct net_device *netdev,
 180				    unsigned int table, unsigned int entry,
 181				    struct udp_tunnel_info *ti)
 182{
 183	struct enic *enic = netdev_priv(netdev);
 184	int err;
 185
 186	spin_lock_bh(&enic->devcmd_lock);
 187
 188	err = vnic_dev_overlay_offload_cfg(enic->vdev,
 189					   OVERLAY_CFG_VXLAN_PORT_UPDATE,
 190					   ntohs(ti->port));
 191	if (err)
 192		goto error;
 193
 194	err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
 195					    enic->vxlan.patch_level);
 196	if (err)
 197		goto error;
 198
 199	enic->vxlan.vxlan_udp_port_number = ntohs(ti->port);
 200error:
 201	spin_unlock_bh(&enic->devcmd_lock);
 202
 203	return err;
 204}
 205
 206static int enic_udp_tunnel_unset_port(struct net_device *netdev,
 207				      unsigned int table, unsigned int entry,
 208				      struct udp_tunnel_info *ti)
 209{
 210	struct enic *enic = netdev_priv(netdev);
 211	int err;
 212
 213	spin_lock_bh(&enic->devcmd_lock);
 214
 215	err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
 216					    OVERLAY_OFFLOAD_DISABLE);
 217	if (err)
 218		goto unlock;
 219
 220	enic->vxlan.vxlan_udp_port_number = 0;
 221
 222unlock:
 223	spin_unlock_bh(&enic->devcmd_lock);
 224
 225	return err;
 226}
 227
 228static const struct udp_tunnel_nic_info enic_udp_tunnels = {
 229	.set_port	= enic_udp_tunnel_set_port,
 230	.unset_port	= enic_udp_tunnel_unset_port,
 231	.tables		= {
 232		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
 233	},
 234}, enic_udp_tunnels_v4 = {
 235	.set_port	= enic_udp_tunnel_set_port,
 236	.unset_port	= enic_udp_tunnel_unset_port,
 237	.flags		= UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
 238	.tables		= {
 239		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
 240	},
 241};
 242
 243static netdev_features_t enic_features_check(struct sk_buff *skb,
 244					     struct net_device *dev,
 245					     netdev_features_t features)
 246{
 247	const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
 248	struct enic *enic = netdev_priv(dev);
 249	struct udphdr *udph;
 250	u16 port = 0;
 251	u8 proto;
 252
 253	if (!skb->encapsulation)
 254		return features;
 255
 256	features = vxlan_features_check(skb, features);
 257
 258	switch (vlan_get_protocol(skb)) {
 259	case htons(ETH_P_IPV6):
 260		if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6))
 261			goto out;
 262		proto = ipv6_hdr(skb)->nexthdr;
 263		break;
 264	case htons(ETH_P_IP):
 265		proto = ip_hdr(skb)->protocol;
 266		break;
 267	default:
 268		goto out;
 269	}
 270
 271	switch (eth->h_proto) {
 272	case ntohs(ETH_P_IPV6):
 273		if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
 274			goto out;
 275		fallthrough;
 276	case ntohs(ETH_P_IP):
 277		break;
 278	default:
 279		goto out;
 280	}
 281
 282
 283	if (proto == IPPROTO_UDP) {
 284		udph = udp_hdr(skb);
 285		port = be16_to_cpu(udph->dest);
 286	}
 287
 288	/* HW supports offload of only one UDP port. Remove CSUM and GSO MASK
 289	 * for other UDP port tunnels
 290	 */
 291	if (port  != enic->vxlan.vxlan_udp_port_number)
 292		goto out;
 293
 294	return features;
 295
 296out:
 297	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 298}
 299
 300int enic_is_dynamic(struct enic *enic)
 301{
 302	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
 303}
 304
 305int enic_sriov_enabled(struct enic *enic)
 306{
 307	return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
 308}
 309
 310static int enic_is_sriov_vf(struct enic *enic)
 311{
 312	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
 313}
 314
 315int enic_is_valid_vf(struct enic *enic, int vf)
 316{
 317#ifdef CONFIG_PCI_IOV
 318	return vf >= 0 && vf < enic->num_vfs;
 319#else
 320	return 0;
 321#endif
 322}
 323
 324static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
 325{
 326	struct enic *enic = vnic_dev_priv(wq->vdev);
 327
 328	if (buf->sop)
 329		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
 330				 DMA_TO_DEVICE);
 331	else
 332		dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
 333			       DMA_TO_DEVICE);
 334
 335	if (buf->os_buf)
 336		dev_kfree_skb_any(buf->os_buf);
 337}
 338
 339static void enic_wq_free_buf(struct vnic_wq *wq,
 340	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
 341{
 342	enic_free_wq_buf(wq, buf);
 343}
 344
 345static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
 346	u8 type, u16 q_number, u16 completed_index, void *opaque)
 347{
 348	struct enic *enic = vnic_dev_priv(vdev);
 349
 350	spin_lock(&enic->wq_lock[q_number]);
 351
 352	vnic_wq_service(&enic->wq[q_number], cq_desc,
 353		completed_index, enic_wq_free_buf,
 354		opaque);
 355
 356	if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
 357	    vnic_wq_desc_avail(&enic->wq[q_number]) >=
 358	    (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
 359		netif_wake_subqueue(enic->netdev, q_number);
 360
 361	spin_unlock(&enic->wq_lock[q_number]);
 362
 363	return 0;
 364}
 365
 366static bool enic_log_q_error(struct enic *enic)
 367{
 368	unsigned int i;
 369	u32 error_status;
 370	bool err = false;
 371
 372	for (i = 0; i < enic->wq_count; i++) {
 373		error_status = vnic_wq_error_status(&enic->wq[i]);
 374		err |= error_status;
 375		if (error_status)
 376			netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
 377				i, error_status);
 378	}
 379
 380	for (i = 0; i < enic->rq_count; i++) {
 381		error_status = vnic_rq_error_status(&enic->rq[i]);
 382		err |= error_status;
 383		if (error_status)
 384			netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
 385				i, error_status);
 386	}
 387
 388	return err;
 389}
 390
 391static void enic_msglvl_check(struct enic *enic)
 392{
 393	u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
 394
 395	if (msg_enable != enic->msg_enable) {
 396		netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
 397			enic->msg_enable, msg_enable);
 398		enic->msg_enable = msg_enable;
 399	}
 400}
 401
 402static void enic_mtu_check(struct enic *enic)
 403{
 404	u32 mtu = vnic_dev_mtu(enic->vdev);
 405	struct net_device *netdev = enic->netdev;
 406
 407	if (mtu && mtu != enic->port_mtu) {
 408		enic->port_mtu = mtu;
 409		if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
 410			mtu = max_t(int, ENIC_MIN_MTU,
 411				min_t(int, ENIC_MAX_MTU, mtu));
 412			if (mtu != netdev->mtu)
 413				schedule_work(&enic->change_mtu_work);
 414		} else {
 415			if (mtu < netdev->mtu)
 416				netdev_warn(netdev,
 417					"interface MTU (%d) set higher "
 418					"than switch port MTU (%d)\n",
 419					netdev->mtu, mtu);
 420		}
 421	}
 422}
 423
 424static void enic_link_check(struct enic *enic)
 425{
 426	int link_status = vnic_dev_link_status(enic->vdev);
 427	int carrier_ok = netif_carrier_ok(enic->netdev);
 428
 429	if (link_status && !carrier_ok) {
 430		netdev_info(enic->netdev, "Link UP\n");
 431		netif_carrier_on(enic->netdev);
 432	} else if (!link_status && carrier_ok) {
 433		netdev_info(enic->netdev, "Link DOWN\n");
 434		netif_carrier_off(enic->netdev);
 435	}
 436}
 437
 438static void enic_notify_check(struct enic *enic)
 439{
 440	enic_msglvl_check(enic);
 441	enic_mtu_check(enic);
 442	enic_link_check(enic);
 443}
 444
 445#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
 446
 447static irqreturn_t enic_isr_legacy(int irq, void *data)
 448{
 449	struct net_device *netdev = data;
 450	struct enic *enic = netdev_priv(netdev);
 451	unsigned int io_intr = ENIC_LEGACY_IO_INTR;
 452	unsigned int err_intr = ENIC_LEGACY_ERR_INTR;
 453	unsigned int notify_intr = ENIC_LEGACY_NOTIFY_INTR;
 454	u32 pba;
 455
 456	vnic_intr_mask(&enic->intr[io_intr]);
 457
 458	pba = vnic_intr_legacy_pba(enic->legacy_pba);
 459	if (!pba) {
 460		vnic_intr_unmask(&enic->intr[io_intr]);
 461		return IRQ_NONE;	/* not our interrupt */
 462	}
 463
 464	if (ENIC_TEST_INTR(pba, notify_intr)) {
 465		enic_notify_check(enic);
 466		vnic_intr_return_all_credits(&enic->intr[notify_intr]);
 467	}
 468
 469	if (ENIC_TEST_INTR(pba, err_intr)) {
 470		vnic_intr_return_all_credits(&enic->intr[err_intr]);
 471		enic_log_q_error(enic);
 472		/* schedule recovery from WQ/RQ error */
 473		schedule_work(&enic->reset);
 474		return IRQ_HANDLED;
 475	}
 476
 477	if (ENIC_TEST_INTR(pba, io_intr))
 478		napi_schedule_irqoff(&enic->napi[0]);
 479	else
 480		vnic_intr_unmask(&enic->intr[io_intr]);
 481
 482	return IRQ_HANDLED;
 483}
 484
 485static irqreturn_t enic_isr_msi(int irq, void *data)
 486{
 487	struct enic *enic = data;
 488
 489	/* With MSI, there is no sharing of interrupts, so this is
 490	 * our interrupt and there is no need to ack it.  The device
 491	 * is not providing per-vector masking, so the OS will not
 492	 * write to PCI config space to mask/unmask the interrupt.
 493	 * We're using mask_on_assertion for MSI, so the device
 494	 * automatically masks the interrupt when the interrupt is
 495	 * generated.  Later, when exiting polling, the interrupt
 496	 * will be unmasked (see enic_poll).
 497	 *
 498	 * Also, the device uses the same PCIe Traffic Class (TC)
 499	 * for Memory Write data and MSI, so there are no ordering
 500	 * issues; the MSI will always arrive at the Root Complex
 501	 * _after_ corresponding Memory Writes (i.e. descriptor
 502	 * writes).
 503	 */
 504
 505	napi_schedule_irqoff(&enic->napi[0]);
 506
 507	return IRQ_HANDLED;
 508}
 509
 510static irqreturn_t enic_isr_msix(int irq, void *data)
 511{
 512	struct napi_struct *napi = data;
 513
 514	napi_schedule_irqoff(napi);
 515
 516	return IRQ_HANDLED;
 517}
 518
 519static irqreturn_t enic_isr_msix_err(int irq, void *data)
 520{
 521	struct enic *enic = data;
 522	unsigned int intr = enic_msix_err_intr(enic);
 523
 524	vnic_intr_return_all_credits(&enic->intr[intr]);
 525
 526	if (enic_log_q_error(enic))
 527		/* schedule recovery from WQ/RQ error */
 528		schedule_work(&enic->reset);
 529
 530	return IRQ_HANDLED;
 531}
 532
 533static irqreturn_t enic_isr_msix_notify(int irq, void *data)
 534{
 535	struct enic *enic = data;
 536	unsigned int intr = enic_msix_notify_intr(enic);
 537
 538	enic_notify_check(enic);
 539	vnic_intr_return_all_credits(&enic->intr[intr]);
 540
 541	return IRQ_HANDLED;
 542}
 543
 544static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
 545				  struct sk_buff *skb, unsigned int len_left,
 546				  int loopback)
 547{
 548	const skb_frag_t *frag;
 549	dma_addr_t dma_addr;
 550
 551	/* Queue additional data fragments */
 552	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
 553		len_left -= skb_frag_size(frag);
 554		dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
 555					    skb_frag_size(frag),
 556					    DMA_TO_DEVICE);
 557		if (unlikely(enic_dma_map_check(enic, dma_addr)))
 558			return -ENOMEM;
 559		enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
 560					(len_left == 0),	/* EOP? */
 561					loopback);
 562	}
 563
 564	return 0;
 565}
 566
 567static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
 568				  struct sk_buff *skb, int vlan_tag_insert,
 569				  unsigned int vlan_tag, int loopback)
 570{
 571	unsigned int head_len = skb_headlen(skb);
 572	unsigned int len_left = skb->len - head_len;
 573	int eop = (len_left == 0);
 574	dma_addr_t dma_addr;
 575	int err = 0;
 576
 577	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
 578				  DMA_TO_DEVICE);
 579	if (unlikely(enic_dma_map_check(enic, dma_addr)))
 580		return -ENOMEM;
 581
 582	/* Queue the main skb fragment. The fragments are no larger
 583	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
 584	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
 585	 * per fragment is queued.
 586	 */
 587	enic_queue_wq_desc(wq, skb, dma_addr, head_len,	vlan_tag_insert,
 588			   vlan_tag, eop, loopback);
 589
 590	if (!eop)
 591		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 592
 593	return err;
 594}
 595
 596static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
 597				     struct sk_buff *skb, int vlan_tag_insert,
 598				     unsigned int vlan_tag, int loopback)
 599{
 600	unsigned int head_len = skb_headlen(skb);
 601	unsigned int len_left = skb->len - head_len;
 602	unsigned int hdr_len = skb_checksum_start_offset(skb);
 603	unsigned int csum_offset = hdr_len + skb->csum_offset;
 604	int eop = (len_left == 0);
 605	dma_addr_t dma_addr;
 606	int err = 0;
 607
 608	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
 609				  DMA_TO_DEVICE);
 610	if (unlikely(enic_dma_map_check(enic, dma_addr)))
 611		return -ENOMEM;
 612
 613	/* Queue the main skb fragment. The fragments are no larger
 614	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
 615	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
 616	 * per fragment is queued.
 617	 */
 618	enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len,	csum_offset,
 619				   hdr_len, vlan_tag_insert, vlan_tag, eop,
 620				   loopback);
 621
 622	if (!eop)
 623		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 624
 625	return err;
 626}
 627
 628static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
 629{
 630	const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
 631
 632	switch (eth->h_proto) {
 633	case ntohs(ETH_P_IP):
 634		inner_ip_hdr(skb)->check = 0;
 635		inner_tcp_hdr(skb)->check =
 636			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
 637					   inner_ip_hdr(skb)->daddr, 0,
 638					   IPPROTO_TCP, 0);
 639		break;
 640	case ntohs(ETH_P_IPV6):
 641		inner_tcp_hdr(skb)->check =
 642			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
 643					 &inner_ipv6_hdr(skb)->daddr, 0,
 644					 IPPROTO_TCP, 0);
 645		break;
 646	default:
 647		WARN_ONCE(1, "Non ipv4/ipv6 inner pkt for encap offload");
 648		break;
 649	}
 650}
 651
 652static void enic_preload_tcp_csum(struct sk_buff *skb)
 653{
 654	/* Preload TCP csum field with IP pseudo hdr calculated
 655	 * with IP length set to zero.  HW will later add in length
 656	 * to each TCP segment resulting from the TSO.
 657	 */
 658
 659	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
 660		ip_hdr(skb)->check = 0;
 661		tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 662			ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
 663	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
 664		tcp_v6_gso_csum_prep(skb);
 665	}
 666}
 667
 668static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
 669				 struct sk_buff *skb, unsigned int mss,
 670				 int vlan_tag_insert, unsigned int vlan_tag,
 671				 int loopback)
 672{
 673	unsigned int frag_len_left = skb_headlen(skb);
 674	unsigned int len_left = skb->len - frag_len_left;
 675	int eop = (len_left == 0);
 676	unsigned int offset = 0;
 677	unsigned int hdr_len;
 678	dma_addr_t dma_addr;
 679	unsigned int len;
 680	skb_frag_t *frag;
 681
 682	if (skb->encapsulation) {
 683		hdr_len = skb_inner_tcp_all_headers(skb);
 684		enic_preload_tcp_csum_encap(skb);
 685	} else {
 686		hdr_len = skb_tcp_all_headers(skb);
 687		enic_preload_tcp_csum(skb);
 688	}
 689
 690	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
 691	 * for the main skb fragment
 692	 */
 693	while (frag_len_left) {
 694		len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
 695		dma_addr = dma_map_single(&enic->pdev->dev,
 696					  skb->data + offset, len,
 697					  DMA_TO_DEVICE);
 698		if (unlikely(enic_dma_map_check(enic, dma_addr)))
 699			return -ENOMEM;
 700		enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
 701				       vlan_tag_insert, vlan_tag,
 702				       eop && (len == frag_len_left), loopback);
 703		frag_len_left -= len;
 704		offset += len;
 705	}
 706
 707	if (eop)
 708		return 0;
 709
 710	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
 711	 * for additional data fragments
 712	 */
 713	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
 714		len_left -= skb_frag_size(frag);
 715		frag_len_left = skb_frag_size(frag);
 716		offset = 0;
 717
 718		while (frag_len_left) {
 719			len = min(frag_len_left,
 720				(unsigned int)WQ_ENET_MAX_DESC_LEN);
 721			dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
 722						    offset, len,
 723						    DMA_TO_DEVICE);
 724			if (unlikely(enic_dma_map_check(enic, dma_addr)))
 725				return -ENOMEM;
 726			enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
 727						(len_left == 0) &&
 728						 (len == frag_len_left),/*EOP*/
 729						loopback);
 730			frag_len_left -= len;
 731			offset += len;
 732		}
 733	}
 734
 735	return 0;
 736}
 737
 738static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
 739					  struct sk_buff *skb,
 740					  int vlan_tag_insert,
 741					  unsigned int vlan_tag, int loopback)
 742{
 743	unsigned int head_len = skb_headlen(skb);
 744	unsigned int len_left = skb->len - head_len;
 745	/* Hardware will overwrite the checksum fields, calculating from
 746	 * scratch and ignoring the value placed by software.
 747	 * Offload mode = 00
 748	 * mss[2], mss[1], mss[0] bits are set
 749	 */
 750	unsigned int mss_or_csum = 7;
 751	int eop = (len_left == 0);
 752	dma_addr_t dma_addr;
 753	int err = 0;
 754
 755	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
 756				  DMA_TO_DEVICE);
 757	if (unlikely(enic_dma_map_check(enic, dma_addr)))
 758		return -ENOMEM;
 759
 760	enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0,
 761			      vlan_tag_insert, vlan_tag,
 762			      WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop,
 763			      loopback);
 764	if (!eop)
 765		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
 766
 767	return err;
 768}
 769
 770static inline int enic_queue_wq_skb(struct enic *enic,
 771	struct vnic_wq *wq, struct sk_buff *skb)
 772{
 773	unsigned int mss = skb_shinfo(skb)->gso_size;
 774	unsigned int vlan_tag = 0;
 775	int vlan_tag_insert = 0;
 776	int loopback = 0;
 777	int err;
 778
 779	if (skb_vlan_tag_present(skb)) {
 780		/* VLAN tag from trunking driver */
 781		vlan_tag_insert = 1;
 782		vlan_tag = skb_vlan_tag_get(skb);
 783	} else if (enic->loop_enable) {
 784		vlan_tag = enic->loop_tag;
 785		loopback = 1;
 786	}
 787
 788	if (mss)
 789		err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
 790					    vlan_tag_insert, vlan_tag,
 791					    loopback);
 792	else if (skb->encapsulation)
 793		err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
 794					      vlan_tag, loopback);
 795	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
 796		err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
 797						vlan_tag, loopback);
 798	else
 799		err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
 800					     vlan_tag, loopback);
 801	if (unlikely(err)) {
 802		struct vnic_wq_buf *buf;
 803
 804		buf = wq->to_use->prev;
 805		/* while not EOP of previous pkt && queue not empty.
 806		 * For all non EOP bufs, os_buf is NULL.
 807		 */
 808		while (!buf->os_buf && (buf->next != wq->to_clean)) {
 809			enic_free_wq_buf(wq, buf);
 810			wq->ring.desc_avail++;
 811			buf = buf->prev;
 812		}
 813		wq->to_use = buf->next;
 814		dev_kfree_skb(skb);
 815	}
 816	return err;
 817}
 818
 819/* netif_tx_lock held, process context with BHs disabled, or BH */
 820static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
 821	struct net_device *netdev)
 822{
 823	struct enic *enic = netdev_priv(netdev);
 824	struct vnic_wq *wq;
 825	unsigned int txq_map;
 826	struct netdev_queue *txq;
 827
 828	if (skb->len <= 0) {
 829		dev_kfree_skb_any(skb);
 830		return NETDEV_TX_OK;
 831	}
 832
 833	txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
 834	wq = &enic->wq[txq_map];
 835	txq = netdev_get_tx_queue(netdev, txq_map);
 836
 837	/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
 838	 * which is very likely.  In the off chance it's going to take
 839	 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
 840	 */
 841
 842	if (skb_shinfo(skb)->gso_size == 0 &&
 843	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
 844	    skb_linearize(skb)) {
 845		dev_kfree_skb_any(skb);
 846		return NETDEV_TX_OK;
 847	}
 848
 849	spin_lock(&enic->wq_lock[txq_map]);
 850
 851	if (vnic_wq_desc_avail(wq) <
 852	    skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
 853		netif_tx_stop_queue(txq);
 854		/* This is a hard error, log it */
 855		netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
 856		spin_unlock(&enic->wq_lock[txq_map]);
 857		return NETDEV_TX_BUSY;
 858	}
 859
 860	if (enic_queue_wq_skb(enic, wq, skb))
 861		goto error;
 862
 863	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
 864		netif_tx_stop_queue(txq);
 865	skb_tx_timestamp(skb);
 866	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
 867		vnic_wq_doorbell(wq);
 868
 869error:
 870	spin_unlock(&enic->wq_lock[txq_map]);
 871
 872	return NETDEV_TX_OK;
 873}
 874
 875/* rcu_read_lock potentially held, nominally process context */
 876static void enic_get_stats(struct net_device *netdev,
 877			   struct rtnl_link_stats64 *net_stats)
 878{
 879	struct enic *enic = netdev_priv(netdev);
 880	struct vnic_stats *stats;
 881	int err;
 882
 883	err = enic_dev_stats_dump(enic, &stats);
 884	/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
 885	 * For other failures, like devcmd failure, we return previously
 886	 * recorded stats.
 887	 */
 888	if (err == -ENOMEM)
 889		return;
 890
 891	net_stats->tx_packets = stats->tx.tx_frames_ok;
 892	net_stats->tx_bytes = stats->tx.tx_bytes_ok;
 893	net_stats->tx_errors = stats->tx.tx_errors;
 894	net_stats->tx_dropped = stats->tx.tx_drops;
 895
 896	net_stats->rx_packets = stats->rx.rx_frames_ok;
 897	net_stats->rx_bytes = stats->rx.rx_bytes_ok;
 898	net_stats->rx_errors = stats->rx.rx_errors;
 899	net_stats->multicast = stats->rx.rx_multicast_frames_ok;
 900	net_stats->rx_over_errors = enic->rq_truncated_pkts;
 901	net_stats->rx_crc_errors = enic->rq_bad_fcs;
 902	net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
 903}
 904
 905static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
 906{
 907	struct enic *enic = netdev_priv(netdev);
 908
 909	if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
 910		unsigned int mc_count = netdev_mc_count(netdev);
 911
 912		netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
 913			    ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
 914
 915		return -ENOSPC;
 916	}
 917
 918	enic_dev_add_addr(enic, mc_addr);
 919	enic->mc_count++;
 920
 921	return 0;
 922}
 923
 924static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
 925{
 926	struct enic *enic = netdev_priv(netdev);
 927
 928	enic_dev_del_addr(enic, mc_addr);
 929	enic->mc_count--;
 930
 931	return 0;
 932}
 933
 934static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
 935{
 936	struct enic *enic = netdev_priv(netdev);
 937
 938	if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
 939		unsigned int uc_count = netdev_uc_count(netdev);
 940
 941		netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
 942			    ENIC_UNICAST_PERFECT_FILTERS, uc_count);
 943
 944		return -ENOSPC;
 945	}
 946
 947	enic_dev_add_addr(enic, uc_addr);
 948	enic->uc_count++;
 949
 950	return 0;
 951}
 952
 953static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
 954{
 955	struct enic *enic = netdev_priv(netdev);
 956
 957	enic_dev_del_addr(enic, uc_addr);
 958	enic->uc_count--;
 959
 960	return 0;
 961}
 962
 963void enic_reset_addr_lists(struct enic *enic)
 964{
 965	struct net_device *netdev = enic->netdev;
 966
 967	__dev_uc_unsync(netdev, NULL);
 968	__dev_mc_unsync(netdev, NULL);
 969
 970	enic->mc_count = 0;
 971	enic->uc_count = 0;
 972	enic->flags = 0;
 973}
 974
 975static int enic_set_mac_addr(struct net_device *netdev, char *addr)
 976{
 977	struct enic *enic = netdev_priv(netdev);
 978
 979	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
 980		if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
 981			return -EADDRNOTAVAIL;
 982	} else {
 983		if (!is_valid_ether_addr(addr))
 984			return -EADDRNOTAVAIL;
 985	}
 986
 987	eth_hw_addr_set(netdev, addr);
 988
 989	return 0;
 990}
 991
 992static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
 993{
 994	struct enic *enic = netdev_priv(netdev);
 995	struct sockaddr *saddr = p;
 996	char *addr = saddr->sa_data;
 997	int err;
 998
 999	if (netif_running(enic->netdev)) {
1000		err = enic_dev_del_station_addr(enic);
1001		if (err)
1002			return err;
1003	}
1004
1005	err = enic_set_mac_addr(netdev, addr);
1006	if (err)
1007		return err;
1008
1009	if (netif_running(enic->netdev)) {
1010		err = enic_dev_add_station_addr(enic);
1011		if (err)
1012			return err;
1013	}
1014
1015	return err;
1016}
1017
1018static int enic_set_mac_address(struct net_device *netdev, void *p)
1019{
1020	struct sockaddr *saddr = p;
1021	char *addr = saddr->sa_data;
1022	struct enic *enic = netdev_priv(netdev);
1023	int err;
1024
1025	err = enic_dev_del_station_addr(enic);
1026	if (err)
1027		return err;
1028
1029	err = enic_set_mac_addr(netdev, addr);
1030	if (err)
1031		return err;
1032
1033	return enic_dev_add_station_addr(enic);
1034}
1035
1036/* netif_tx_lock held, BHs disabled */
1037static void enic_set_rx_mode(struct net_device *netdev)
1038{
1039	struct enic *enic = netdev_priv(netdev);
1040	int directed = 1;
1041	int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1042	int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1043	int promisc = (netdev->flags & IFF_PROMISC) ||
1044		netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1045	int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1046		netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1047	unsigned int flags = netdev->flags |
1048		(allmulti ? IFF_ALLMULTI : 0) |
1049		(promisc ? IFF_PROMISC : 0);
1050
1051	if (enic->flags != flags) {
1052		enic->flags = flags;
1053		enic_dev_packet_filter(enic, directed,
1054			multicast, broadcast, promisc, allmulti);
1055	}
1056
1057	if (!promisc) {
1058		__dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
1059		if (!allmulti)
1060			__dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
1061	}
1062}
1063
1064/* netif_tx_lock held, BHs disabled */
1065static void enic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1066{
1067	struct enic *enic = netdev_priv(netdev);
1068	schedule_work(&enic->tx_hang_reset);
1069}
1070
1071static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1072{
1073	struct enic *enic = netdev_priv(netdev);
1074	struct enic_port_profile *pp;
1075	int err;
1076
1077	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1078	if (err)
1079		return err;
1080
1081	if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
1082		if (vf == PORT_SELF_VF) {
1083			memcpy(pp->vf_mac, mac, ETH_ALEN);
1084			return 0;
1085		} else {
1086			/*
1087			 * For sriov vf's set the mac in hw
1088			 */
1089			ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1090				vnic_dev_set_mac_addr, mac);
1091			return enic_dev_status_to_errno(err);
1092		}
1093	} else
1094		return -EINVAL;
1095}
1096
1097static int enic_set_vf_port(struct net_device *netdev, int vf,
1098	struct nlattr *port[])
1099{
1100	static const u8 zero_addr[ETH_ALEN] = {};
1101	struct enic *enic = netdev_priv(netdev);
1102	struct enic_port_profile prev_pp;
1103	struct enic_port_profile *pp;
1104	int err = 0, restore_pp = 1;
1105
1106	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1107	if (err)
1108		return err;
1109
1110	if (!port[IFLA_PORT_REQUEST])
1111		return -EOPNOTSUPP;
1112
1113	memcpy(&prev_pp, pp, sizeof(*enic->pp));
1114	memset(pp, 0, sizeof(*enic->pp));
1115
1116	pp->set |= ENIC_SET_REQUEST;
1117	pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1118
1119	if (port[IFLA_PORT_PROFILE]) {
1120		if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
1121			memcpy(pp, &prev_pp, sizeof(*pp));
1122			return -EINVAL;
1123		}
1124		pp->set |= ENIC_SET_NAME;
1125		memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
1126			PORT_PROFILE_MAX);
1127	}
1128
1129	if (port[IFLA_PORT_INSTANCE_UUID]) {
1130		if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
1131			memcpy(pp, &prev_pp, sizeof(*pp));
1132			return -EINVAL;
1133		}
1134		pp->set |= ENIC_SET_INSTANCE;
1135		memcpy(pp->instance_uuid,
1136			nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1137	}
1138
1139	if (port[IFLA_PORT_HOST_UUID]) {
1140		if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
1141			memcpy(pp, &prev_pp, sizeof(*pp));
1142			return -EINVAL;
1143		}
1144		pp->set |= ENIC_SET_HOST;
1145		memcpy(pp->host_uuid,
1146			nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1147	}
1148
1149	if (vf == PORT_SELF_VF) {
1150		/* Special case handling: mac came from IFLA_VF_MAC */
1151		if (!is_zero_ether_addr(prev_pp.vf_mac))
1152			memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
1153
1154		if (is_zero_ether_addr(netdev->dev_addr))
1155			eth_hw_addr_random(netdev);
1156	} else {
1157		/* SR-IOV VF: get mac from adapter */
1158		ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1159			vnic_dev_get_mac_addr, pp->mac_addr);
1160		if (err) {
1161			netdev_err(netdev, "Error getting mac for vf %d\n", vf);
1162			memcpy(pp, &prev_pp, sizeof(*pp));
1163			return enic_dev_status_to_errno(err);
1164		}
1165	}
1166
1167	err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
1168	if (err) {
1169		if (restore_pp) {
1170			/* Things are still the way they were: Implicit
1171			 * DISASSOCIATE failed
1172			 */
1173			memcpy(pp, &prev_pp, sizeof(*pp));
1174		} else {
1175			memset(pp, 0, sizeof(*pp));
1176			if (vf == PORT_SELF_VF)
1177				eth_hw_addr_set(netdev, zero_addr);
1178		}
1179	} else {
1180		/* Set flag to indicate that the port assoc/disassoc
1181		 * request has been sent out to fw
1182		 */
1183		pp->set |= ENIC_PORT_REQUEST_APPLIED;
1184
1185		/* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1186		if (pp->request == PORT_REQUEST_DISASSOCIATE) {
1187			eth_zero_addr(pp->mac_addr);
1188			if (vf == PORT_SELF_VF)
1189				eth_hw_addr_set(netdev, zero_addr);
1190		}
1191	}
1192
1193	if (vf == PORT_SELF_VF)
1194		eth_zero_addr(pp->vf_mac);
1195
1196	return err;
1197}
1198
1199static int enic_get_vf_port(struct net_device *netdev, int vf,
1200	struct sk_buff *skb)
1201{
1202	struct enic *enic = netdev_priv(netdev);
1203	u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1204	struct enic_port_profile *pp;
1205	int err;
1206
1207	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1208	if (err)
1209		return err;
1210
1211	if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
1212		return -ENODATA;
1213
1214	err = enic_process_get_pp_request(enic, vf, pp->request, &response);
1215	if (err)
1216		return err;
1217
1218	if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
1219	    nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
1220	    ((pp->set & ENIC_SET_NAME) &&
1221	     nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
1222	    ((pp->set & ENIC_SET_INSTANCE) &&
1223	     nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1224		     pp->instance_uuid)) ||
1225	    ((pp->set & ENIC_SET_HOST) &&
1226	     nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
1227		goto nla_put_failure;
1228	return 0;
1229
1230nla_put_failure:
1231	return -EMSGSIZE;
1232}
1233
1234static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1235{
1236	struct enic *enic = vnic_dev_priv(rq->vdev);
1237
1238	if (!buf->os_buf)
1239		return;
1240
1241	dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1242			 DMA_FROM_DEVICE);
1243	dev_kfree_skb_any(buf->os_buf);
1244	buf->os_buf = NULL;
1245}
1246
1247static int enic_rq_alloc_buf(struct vnic_rq *rq)
1248{
1249	struct enic *enic = vnic_dev_priv(rq->vdev);
1250	struct net_device *netdev = enic->netdev;
1251	struct sk_buff *skb;
1252	unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1253	unsigned int os_buf_index = 0;
1254	dma_addr_t dma_addr;
1255	struct vnic_rq_buf *buf = rq->to_use;
1256
1257	if (buf->os_buf) {
1258		enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
1259				   buf->len);
1260
1261		return 0;
1262	}
1263	skb = netdev_alloc_skb_ip_align(netdev, len);
1264	if (!skb)
1265		return -ENOMEM;
1266
1267	dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
1268				  DMA_FROM_DEVICE);
1269	if (unlikely(enic_dma_map_check(enic, dma_addr))) {
1270		dev_kfree_skb(skb);
1271		return -ENOMEM;
1272	}
1273
1274	enic_queue_rq_desc(rq, skb, os_buf_index,
1275		dma_addr, len);
1276
1277	return 0;
1278}
1279
1280static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
1281				      u32 pkt_len)
1282{
1283	if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
1284		pkt_size->large_pkt_bytes_cnt += pkt_len;
1285	else
1286		pkt_size->small_pkt_bytes_cnt += pkt_len;
1287}
1288
1289static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
1290			     struct vnic_rq_buf *buf, u16 len)
1291{
1292	struct enic *enic = netdev_priv(netdev);
1293	struct sk_buff *new_skb;
1294
1295	if (len > enic->rx_copybreak)
1296		return false;
1297	new_skb = netdev_alloc_skb_ip_align(netdev, len);
1298	if (!new_skb)
1299		return false;
1300	dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
1301				DMA_FROM_DEVICE);
1302	memcpy(new_skb->data, (*skb)->data, len);
1303	*skb = new_skb;
1304
1305	return true;
1306}
1307
1308static void enic_rq_indicate_buf(struct vnic_rq *rq,
1309	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1310	int skipped, void *opaque)
1311{
1312	struct enic *enic = vnic_dev_priv(rq->vdev);
1313	struct net_device *netdev = enic->netdev;
1314	struct sk_buff *skb;
1315	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1316
1317	u8 type, color, eop, sop, ingress_port, vlan_stripped;
1318	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1319	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1320	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1321	u8 packet_error;
1322	u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1323	u32 rss_hash;
1324	bool outer_csum_ok = true, encap = false;
1325
1326	if (skipped)
1327		return;
1328
1329	skb = buf->os_buf;
1330
1331	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1332		&type, &color, &q_number, &completed_index,
1333		&ingress_port, &fcoe, &eop, &sop, &rss_type,
1334		&csum_not_calc, &rss_hash, &bytes_written,
1335		&packet_error, &vlan_stripped, &vlan_tci, &checksum,
1336		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1337		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1338		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1339		&fcs_ok);
1340
1341	if (packet_error) {
1342
1343		if (!fcs_ok) {
1344			if (bytes_written > 0)
1345				enic->rq_bad_fcs++;
1346			else if (bytes_written == 0)
1347				enic->rq_truncated_pkts++;
1348		}
1349
1350		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1351				 DMA_FROM_DEVICE);
1352		dev_kfree_skb_any(skb);
1353		buf->os_buf = NULL;
1354
1355		return;
1356	}
1357
1358	if (eop && bytes_written > 0) {
1359
1360		/* Good receive
1361		 */
1362
1363		if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1364			buf->os_buf = NULL;
1365			dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
1366					 buf->len, DMA_FROM_DEVICE);
1367		}
1368		prefetch(skb->data - NET_IP_ALIGN);
1369
1370		skb_put(skb, bytes_written);
1371		skb->protocol = eth_type_trans(skb, netdev);
1372		skb_record_rx_queue(skb, q_number);
1373		if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
1374		    (type == 3)) {
1375			switch (rss_type) {
1376			case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
1377			case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
1378			case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
1379				skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
1380				break;
1381			case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
1382			case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
1383			case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
1384				skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
1385				break;
1386			}
1387		}
1388		if (enic->vxlan.vxlan_udp_port_number) {
1389			switch (enic->vxlan.patch_level) {
1390			case 0:
1391				if (fcoe) {
1392					encap = true;
1393					outer_csum_ok = fcoe_fc_crc_ok;
1394				}
1395				break;
1396			case 2:
1397				if ((type == 7) &&
1398				    (rss_hash & BIT(0))) {
1399					encap = true;
1400					outer_csum_ok = (rss_hash & BIT(1)) &&
1401							(rss_hash & BIT(2));
1402				}
1403				break;
1404			}
1405		}
1406
1407		/* Hardware does not provide whole packet checksum. It only
1408		 * provides pseudo checksum. Since hw validates the packet
1409		 * checksum but not provide us the checksum value. use
1410		 * CHECSUM_UNNECESSARY.
1411		 *
1412		 * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
1413		 * inner csum_ok. outer_csum_ok is set by hw when outer udp
1414		 * csum is correct or is zero.
1415		 */
1416		if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
1417		    tcp_udp_csum_ok && outer_csum_ok &&
1418		    (ipv4_csum_ok || ipv6)) {
1419			skb->ip_summed = CHECKSUM_UNNECESSARY;
1420			skb->csum_level = encap;
1421		}
1422
1423		if (vlan_stripped)
1424			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1425
1426		skb_mark_napi_id(skb, &enic->napi[rq->index]);
1427		if (!(netdev->features & NETIF_F_GRO))
1428			netif_receive_skb(skb);
1429		else
1430			napi_gro_receive(&enic->napi[q_number], skb);
1431		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1432			enic_intr_update_pkt_size(&cq->pkt_size_counter,
1433						  bytes_written);
1434	} else {
1435
1436		/* Buffer overflow
1437		 */
1438
1439		dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1440				 DMA_FROM_DEVICE);
1441		dev_kfree_skb_any(skb);
1442		buf->os_buf = NULL;
1443	}
1444}
1445
1446static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1447	u8 type, u16 q_number, u16 completed_index, void *opaque)
1448{
1449	struct enic *enic = vnic_dev_priv(vdev);
1450
1451	vnic_rq_service(&enic->rq[q_number], cq_desc,
1452		completed_index, VNIC_RQ_RETURN_DESC,
1453		enic_rq_indicate_buf, opaque);
1454
1455	return 0;
1456}
1457
1458static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1459{
1460	unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1461	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1462	u32 timer = cq->tobe_rx_coal_timeval;
1463
1464	if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1465		vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1466		cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1467	}
1468}
1469
1470static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1471{
1472	struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1473	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1474	struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1475	int index;
1476	u32 timer;
1477	u32 range_start;
1478	u32 traffic;
1479	u64 delta;
1480	ktime_t now = ktime_get();
1481
1482	delta = ktime_us_delta(now, cq->prev_ts);
1483	if (delta < ENIC_AIC_TS_BREAK)
1484		return;
1485	cq->prev_ts = now;
1486
1487	traffic = pkt_size_counter->large_pkt_bytes_cnt +
1488		  pkt_size_counter->small_pkt_bytes_cnt;
1489	/* The table takes Mbps
1490	 * traffic *= 8    => bits
1491	 * traffic *= (10^6 / delta)    => bps
1492	 * traffic /= 10^6     => Mbps
1493	 *
1494	 * Combining, traffic *= (8 / delta)
1495	 */
1496
1497	traffic <<= 3;
1498	traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
1499
1500	for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1501		if (traffic < mod_table[index].rx_rate)
1502			break;
1503	range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1504		       pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1505		      rx_coal->small_pkt_range_start :
1506		      rx_coal->large_pkt_range_start;
1507	timer = range_start + ((rx_coal->range_end - range_start) *
1508			       mod_table[index].range_percent / 100);
1509	/* Damping */
1510	cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1511
1512	pkt_size_counter->large_pkt_bytes_cnt = 0;
1513	pkt_size_counter->small_pkt_bytes_cnt = 0;
1514}
1515
1516static int enic_poll(struct napi_struct *napi, int budget)
1517{
1518	struct net_device *netdev = napi->dev;
1519	struct enic *enic = netdev_priv(netdev);
1520	unsigned int cq_rq = enic_cq_rq(enic, 0);
1521	unsigned int cq_wq = enic_cq_wq(enic, 0);
1522	unsigned int intr = ENIC_LEGACY_IO_INTR;
1523	unsigned int rq_work_to_do = budget;
1524	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
1525	unsigned int  work_done, rq_work_done = 0, wq_work_done;
1526	int err;
1527
1528	wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1529				       enic_wq_service, NULL);
1530
1531	if (budget > 0)
1532		rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1533			rq_work_to_do, enic_rq_service, NULL);
1534
1535	/* Accumulate intr event credits for this polling
1536	 * cycle.  An intr event is the completion of a
1537	 * a WQ or RQ packet.
1538	 */
1539
1540	work_done = rq_work_done + wq_work_done;
1541
1542	if (work_done > 0)
1543		vnic_intr_return_credits(&enic->intr[intr],
1544			work_done,
1545			0 /* don't unmask intr */,
1546			0 /* don't reset intr timer */);
1547
1548	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1549
1550	/* Buffer allocation failed. Stay in polling
1551	 * mode so we can try to fill the ring again.
1552	 */
1553
1554	if (err)
1555		rq_work_done = rq_work_to_do;
1556	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1557		/* Call the function which refreshes the intr coalescing timer
1558		 * value based on the traffic.
1559		 */
1560		enic_calc_int_moderation(enic, &enic->rq[0]);
1561
1562	if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
1563
1564		/* Some work done, but not enough to stay in polling,
1565		 * exit polling
1566		 */
1567
1568		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1569			enic_set_int_moderation(enic, &enic->rq[0]);
1570		vnic_intr_unmask(&enic->intr[intr]);
1571	}
1572
1573	return rq_work_done;
1574}
1575
1576#ifdef CONFIG_RFS_ACCEL
1577static void enic_free_rx_cpu_rmap(struct enic *enic)
1578{
1579	free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1580	enic->netdev->rx_cpu_rmap = NULL;
1581}
1582
1583static void enic_set_rx_cpu_rmap(struct enic *enic)
1584{
1585	int i, res;
1586
1587	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1588		enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1589		if (unlikely(!enic->netdev->rx_cpu_rmap))
1590			return;
1591		for (i = 0; i < enic->rq_count; i++) {
1592			res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1593					       enic->msix_entry[i].vector);
1594			if (unlikely(res)) {
1595				enic_free_rx_cpu_rmap(enic);
1596				return;
1597			}
1598		}
1599	}
1600}
1601
1602#else
1603
1604static void enic_free_rx_cpu_rmap(struct enic *enic)
1605{
1606}
1607
1608static void enic_set_rx_cpu_rmap(struct enic *enic)
1609{
1610}
1611
1612#endif /* CONFIG_RFS_ACCEL */
1613
1614static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
1615{
1616	struct net_device *netdev = napi->dev;
1617	struct enic *enic = netdev_priv(netdev);
1618	unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
1619	struct vnic_wq *wq = &enic->wq[wq_index];
1620	unsigned int cq;
1621	unsigned int intr;
1622	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
1623	unsigned int wq_work_done;
1624	unsigned int wq_irq;
1625
1626	wq_irq = wq->index;
1627	cq = enic_cq_wq(enic, wq_irq);
1628	intr = enic_msix_wq_intr(enic, wq_irq);
1629	wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
1630				       enic_wq_service, NULL);
1631
1632	vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
1633				 0 /* don't unmask intr */,
1634				 1 /* reset intr timer */);
1635	if (!wq_work_done) {
1636		napi_complete(napi);
1637		vnic_intr_unmask(&enic->intr[intr]);
1638		return 0;
1639	}
1640
1641	return budget;
1642}
1643
1644static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1645{
1646	struct net_device *netdev = napi->dev;
1647	struct enic *enic = netdev_priv(netdev);
1648	unsigned int rq = (napi - &enic->napi[0]);
1649	unsigned int cq = enic_cq_rq(enic, rq);
1650	unsigned int intr = enic_msix_rq_intr(enic, rq);
1651	unsigned int work_to_do = budget;
1652	unsigned int work_done = 0;
1653	int err;
1654
1655	/* Service RQ
1656	 */
1657
1658	if (budget > 0)
1659		work_done = vnic_cq_service(&enic->cq[cq],
1660			work_to_do, enic_rq_service, NULL);
1661
1662	/* Return intr event credits for this polling
1663	 * cycle.  An intr event is the completion of a
1664	 * RQ packet.
1665	 */
1666
1667	if (work_done > 0)
1668		vnic_intr_return_credits(&enic->intr[intr],
1669			work_done,
1670			0 /* don't unmask intr */,
1671			0 /* don't reset intr timer */);
1672
1673	err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1674
1675	/* Buffer allocation failed. Stay in polling mode
1676	 * so we can try to fill the ring again.
1677	 */
1678
1679	if (err)
1680		work_done = work_to_do;
1681	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1682		/* Call the function which refreshes the intr coalescing timer
1683		 * value based on the traffic.
1684		 */
1685		enic_calc_int_moderation(enic, &enic->rq[rq]);
1686
1687	if ((work_done < budget) && napi_complete_done(napi, work_done)) {
1688
1689		/* Some work done, but not enough to stay in polling,
1690		 * exit polling
1691		 */
1692
1693		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1694			enic_set_int_moderation(enic, &enic->rq[rq]);
1695		vnic_intr_unmask(&enic->intr[intr]);
1696	}
1697
1698	return work_done;
1699}
1700
1701static void enic_notify_timer(struct timer_list *t)
1702{
1703	struct enic *enic = from_timer(enic, t, notify_timer);
1704
1705	enic_notify_check(enic);
1706
1707	mod_timer(&enic->notify_timer,
1708		round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1709}
1710
1711static void enic_free_intr(struct enic *enic)
1712{
1713	struct net_device *netdev = enic->netdev;
1714	unsigned int i;
1715
1716	enic_free_rx_cpu_rmap(enic);
1717	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1718	case VNIC_DEV_INTR_MODE_INTX:
1719		free_irq(enic->pdev->irq, netdev);
1720		break;
1721	case VNIC_DEV_INTR_MODE_MSI:
1722		free_irq(enic->pdev->irq, enic);
1723		break;
1724	case VNIC_DEV_INTR_MODE_MSIX:
1725		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1726			if (enic->msix[i].requested)
1727				free_irq(enic->msix_entry[i].vector,
1728					enic->msix[i].devid);
1729		break;
1730	default:
1731		break;
1732	}
1733}
1734
1735static int enic_request_intr(struct enic *enic)
1736{
1737	struct net_device *netdev = enic->netdev;
1738	unsigned int i, intr;
1739	int err = 0;
1740
1741	enic_set_rx_cpu_rmap(enic);
1742	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1743
1744	case VNIC_DEV_INTR_MODE_INTX:
1745
1746		err = request_irq(enic->pdev->irq, enic_isr_legacy,
1747			IRQF_SHARED, netdev->name, netdev);
1748		break;
1749
1750	case VNIC_DEV_INTR_MODE_MSI:
1751
1752		err = request_irq(enic->pdev->irq, enic_isr_msi,
1753			0, netdev->name, enic);
1754		break;
1755
1756	case VNIC_DEV_INTR_MODE_MSIX:
1757
1758		for (i = 0; i < enic->rq_count; i++) {
1759			intr = enic_msix_rq_intr(enic, i);
1760			snprintf(enic->msix[intr].devname,
1761				sizeof(enic->msix[intr].devname),
1762				"%s-rx-%u", netdev->name, i);
1763			enic->msix[intr].isr = enic_isr_msix;
1764			enic->msix[intr].devid = &enic->napi[i];
1765		}
1766
1767		for (i = 0; i < enic->wq_count; i++) {
1768			int wq = enic_cq_wq(enic, i);
1769
1770			intr = enic_msix_wq_intr(enic, i);
1771			snprintf(enic->msix[intr].devname,
1772				sizeof(enic->msix[intr].devname),
1773				"%s-tx-%u", netdev->name, i);
1774			enic->msix[intr].isr = enic_isr_msix;
1775			enic->msix[intr].devid = &enic->napi[wq];
1776		}
1777
1778		intr = enic_msix_err_intr(enic);
1779		snprintf(enic->msix[intr].devname,
1780			sizeof(enic->msix[intr].devname),
1781			"%s-err", netdev->name);
1782		enic->msix[intr].isr = enic_isr_msix_err;
1783		enic->msix[intr].devid = enic;
1784
1785		intr = enic_msix_notify_intr(enic);
1786		snprintf(enic->msix[intr].devname,
1787			sizeof(enic->msix[intr].devname),
1788			"%s-notify", netdev->name);
1789		enic->msix[intr].isr = enic_isr_msix_notify;
1790		enic->msix[intr].devid = enic;
1791
1792		for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1793			enic->msix[i].requested = 0;
1794
1795		for (i = 0; i < enic->intr_count; i++) {
1796			err = request_irq(enic->msix_entry[i].vector,
1797				enic->msix[i].isr, 0,
1798				enic->msix[i].devname,
1799				enic->msix[i].devid);
1800			if (err) {
1801				enic_free_intr(enic);
1802				break;
1803			}
1804			enic->msix[i].requested = 1;
1805		}
1806
1807		break;
1808
1809	default:
1810		break;
1811	}
1812
1813	return err;
1814}
1815
1816static void enic_synchronize_irqs(struct enic *enic)
1817{
1818	unsigned int i;
1819
1820	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1821	case VNIC_DEV_INTR_MODE_INTX:
1822	case VNIC_DEV_INTR_MODE_MSI:
1823		synchronize_irq(enic->pdev->irq);
1824		break;
1825	case VNIC_DEV_INTR_MODE_MSIX:
1826		for (i = 0; i < enic->intr_count; i++)
1827			synchronize_irq(enic->msix_entry[i].vector);
1828		break;
1829	default:
1830		break;
1831	}
1832}
1833
1834static void enic_set_rx_coal_setting(struct enic *enic)
1835{
1836	unsigned int speed;
1837	int index = -1;
1838	struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1839
1840	/* 1. Read the link speed from fw
1841	 * 2. Pick the default range for the speed
1842	 * 3. Update it in enic->rx_coalesce_setting
1843	 */
1844	speed = vnic_dev_port_speed(enic->vdev);
1845	if (ENIC_LINK_SPEED_10G < speed)
1846		index = ENIC_LINK_40G_INDEX;
1847	else if (ENIC_LINK_SPEED_4G < speed)
1848		index = ENIC_LINK_10G_INDEX;
1849	else
1850		index = ENIC_LINK_4G_INDEX;
1851
1852	rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1853	rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1854	rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1855
1856	/* Start with the value provided by UCSM */
1857	for (index = 0; index < enic->rq_count; index++)
1858		enic->cq[index].cur_rx_coal_timeval =
1859				enic->config.intr_timer_usec;
1860
1861	rx_coal->use_adaptive_rx_coalesce = 1;
1862}
1863
1864static int enic_dev_notify_set(struct enic *enic)
1865{
1866	int err;
1867
1868	spin_lock_bh(&enic->devcmd_lock);
1869	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1870	case VNIC_DEV_INTR_MODE_INTX:
1871		err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR);
1872		break;
1873	case VNIC_DEV_INTR_MODE_MSIX:
1874		err = vnic_dev_notify_set(enic->vdev,
1875			enic_msix_notify_intr(enic));
1876		break;
1877	default:
1878		err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1879		break;
1880	}
1881	spin_unlock_bh(&enic->devcmd_lock);
1882
1883	return err;
1884}
1885
1886static void enic_notify_timer_start(struct enic *enic)
1887{
1888	switch (vnic_dev_get_intr_mode(enic->vdev)) {
1889	case VNIC_DEV_INTR_MODE_MSI:
1890		mod_timer(&enic->notify_timer, jiffies);
1891		break;
1892	default:
1893		/* Using intr for notification for INTx/MSI-X */
1894		break;
1895	}
1896}
1897
1898/* rtnl lock is held, process context */
1899static int enic_open(struct net_device *netdev)
1900{
1901	struct enic *enic = netdev_priv(netdev);
1902	unsigned int i;
1903	int err, ret;
1904
1905	err = enic_request_intr(enic);
1906	if (err) {
1907		netdev_err(netdev, "Unable to request irq.\n");
1908		return err;
1909	}
1910	enic_init_affinity_hint(enic);
1911	enic_set_affinity_hint(enic);
1912
1913	err = enic_dev_notify_set(enic);
1914	if (err) {
1915		netdev_err(netdev,
1916			"Failed to alloc notify buffer, aborting.\n");
1917		goto err_out_free_intr;
1918	}
1919
1920	for (i = 0; i < enic->rq_count; i++) {
1921		/* enable rq before updating rq desc */
1922		vnic_rq_enable(&enic->rq[i]);
1923		vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1924		/* Need at least one buffer on ring to get going */
1925		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1926			netdev_err(netdev, "Unable to alloc receive buffers\n");
1927			err = -ENOMEM;
1928			goto err_out_free_rq;
1929		}
1930	}
1931
1932	for (i = 0; i < enic->wq_count; i++)
1933		vnic_wq_enable(&enic->wq[i]);
1934
1935	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1936		enic_dev_add_station_addr(enic);
1937
1938	enic_set_rx_mode(netdev);
1939
1940	netif_tx_wake_all_queues(netdev);
1941
1942	for (i = 0; i < enic->rq_count; i++)
1943		napi_enable(&enic->napi[i]);
1944
1945	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1946		for (i = 0; i < enic->wq_count; i++)
1947			napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
1948	enic_dev_enable(enic);
1949
1950	for (i = 0; i < enic->intr_count; i++)
1951		vnic_intr_unmask(&enic->intr[i]);
1952
1953	enic_notify_timer_start(enic);
1954	enic_rfs_timer_start(enic);
1955
1956	return 0;
1957
1958err_out_free_rq:
1959	for (i = 0; i < enic->rq_count; i++) {
1960		ret = vnic_rq_disable(&enic->rq[i]);
1961		if (!ret)
1962			vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1963	}
1964	enic_dev_notify_unset(enic);
1965err_out_free_intr:
1966	enic_unset_affinity_hint(enic);
1967	enic_free_intr(enic);
1968
1969	return err;
1970}
1971
1972/* rtnl lock is held, process context */
1973static int enic_stop(struct net_device *netdev)
1974{
1975	struct enic *enic = netdev_priv(netdev);
1976	unsigned int i;
1977	int err;
1978
1979	for (i = 0; i < enic->intr_count; i++) {
1980		vnic_intr_mask(&enic->intr[i]);
1981		(void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1982	}
1983
1984	enic_synchronize_irqs(enic);
1985
1986	del_timer_sync(&enic->notify_timer);
1987	enic_rfs_flw_tbl_free(enic);
1988
1989	enic_dev_disable(enic);
1990
1991	for (i = 0; i < enic->rq_count; i++)
1992		napi_disable(&enic->napi[i]);
1993
1994	netif_carrier_off(netdev);
1995	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1996		for (i = 0; i < enic->wq_count; i++)
1997			napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
1998	netif_tx_disable(netdev);
1999
2000	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
2001		enic_dev_del_station_addr(enic);
2002
2003	for (i = 0; i < enic->wq_count; i++) {
2004		err = vnic_wq_disable(&enic->wq[i]);
2005		if (err)
2006			return err;
2007	}
2008	for (i = 0; i < enic->rq_count; i++) {
2009		err = vnic_rq_disable(&enic->rq[i]);
2010		if (err)
2011			return err;
2012	}
2013
2014	enic_dev_notify_unset(enic);
2015	enic_unset_affinity_hint(enic);
2016	enic_free_intr(enic);
2017
2018	for (i = 0; i < enic->wq_count; i++)
2019		vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
2020	for (i = 0; i < enic->rq_count; i++)
2021		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
2022	for (i = 0; i < enic->cq_count; i++)
2023		vnic_cq_clean(&enic->cq[i]);
2024	for (i = 0; i < enic->intr_count; i++)
2025		vnic_intr_clean(&enic->intr[i]);
2026
2027	return 0;
2028}
2029
2030static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
2031{
2032	bool running = netif_running(netdev);
2033	int err = 0;
2034
2035	ASSERT_RTNL();
2036	if (running) {
2037		err = enic_stop(netdev);
2038		if (err)
2039			return err;
2040	}
2041
2042	netdev->mtu = new_mtu;
2043
2044	if (running) {
2045		err = enic_open(netdev);
2046		if (err)
2047			return err;
2048	}
2049
2050	return 0;
2051}
2052
2053static int enic_change_mtu(struct net_device *netdev, int new_mtu)
2054{
2055	struct enic *enic = netdev_priv(netdev);
2056
2057	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2058		return -EOPNOTSUPP;
2059
2060	if (netdev->mtu > enic->port_mtu)
2061		netdev_warn(netdev,
2062			    "interface MTU (%d) set higher than port MTU (%d)\n",
2063			    netdev->mtu, enic->port_mtu);
2064
2065	return _enic_change_mtu(netdev, new_mtu);
2066}
2067
2068static void enic_change_mtu_work(struct work_struct *work)
2069{
2070	struct enic *enic = container_of(work, struct enic, change_mtu_work);
2071	struct net_device *netdev = enic->netdev;
2072	int new_mtu = vnic_dev_mtu(enic->vdev);
2073
2074	rtnl_lock();
2075	(void)_enic_change_mtu(netdev, new_mtu);
2076	rtnl_unlock();
2077
2078	netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
2079}
2080
2081#ifdef CONFIG_NET_POLL_CONTROLLER
2082static void enic_poll_controller(struct net_device *netdev)
2083{
2084	struct enic *enic = netdev_priv(netdev);
2085	struct vnic_dev *vdev = enic->vdev;
2086	unsigned int i, intr;
2087
2088	switch (vnic_dev_get_intr_mode(vdev)) {
2089	case VNIC_DEV_INTR_MODE_MSIX:
2090		for (i = 0; i < enic->rq_count; i++) {
2091			intr = enic_msix_rq_intr(enic, i);
2092			enic_isr_msix(enic->msix_entry[intr].vector,
2093				      &enic->napi[i]);
2094		}
2095
2096		for (i = 0; i < enic->wq_count; i++) {
2097			intr = enic_msix_wq_intr(enic, i);
2098			enic_isr_msix(enic->msix_entry[intr].vector,
2099				      &enic->napi[enic_cq_wq(enic, i)]);
2100		}
2101
2102		break;
2103	case VNIC_DEV_INTR_MODE_MSI:
2104		enic_isr_msi(enic->pdev->irq, enic);
2105		break;
2106	case VNIC_DEV_INTR_MODE_INTX:
2107		enic_isr_legacy(enic->pdev->irq, netdev);
2108		break;
2109	default:
2110		break;
2111	}
2112}
2113#endif
2114
2115static int enic_dev_wait(struct vnic_dev *vdev,
2116	int (*start)(struct vnic_dev *, int),
2117	int (*finished)(struct vnic_dev *, int *),
2118	int arg)
2119{
2120	unsigned long time;
2121	int done;
2122	int err;
2123
2124	err = start(vdev, arg);
2125	if (err)
2126		return err;
2127
2128	/* Wait for func to complete...2 seconds max
2129	 */
2130
2131	time = jiffies + (HZ * 2);
2132	do {
2133
2134		err = finished(vdev, &done);
2135		if (err)
2136			return err;
2137
2138		if (done)
2139			return 0;
2140
2141		schedule_timeout_uninterruptible(HZ / 10);
2142
2143	} while (time_after(time, jiffies));
2144
2145	return -ETIMEDOUT;
2146}
2147
2148static int enic_dev_open(struct enic *enic)
2149{
2150	int err;
2151	u32 flags = CMD_OPENF_IG_DESCCACHE;
2152
2153	err = enic_dev_wait(enic->vdev, vnic_dev_open,
2154		vnic_dev_open_done, flags);
2155	if (err)
2156		dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
2157			err);
2158
2159	return err;
2160}
2161
2162static int enic_dev_soft_reset(struct enic *enic)
2163{
2164	int err;
2165
2166	err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
2167			    vnic_dev_soft_reset_done, 0);
2168	if (err)
2169		netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
2170			   err);
2171
2172	return err;
2173}
2174
2175static int enic_dev_hang_reset(struct enic *enic)
2176{
2177	int err;
2178
2179	err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
2180		vnic_dev_hang_reset_done, 0);
2181	if (err)
2182		netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
2183			err);
2184
2185	return err;
2186}
2187
2188int __enic_set_rsskey(struct enic *enic)
2189{
2190	union vnic_rss_key *rss_key_buf_va;
2191	dma_addr_t rss_key_buf_pa;
2192	int i, kidx, bidx, err;
2193
2194	rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev,
2195					    sizeof(union vnic_rss_key),
2196					    &rss_key_buf_pa, GFP_ATOMIC);
2197	if (!rss_key_buf_va)
2198		return -ENOMEM;
2199
2200	for (i = 0; i < ENIC_RSS_LEN; i++) {
2201		kidx = i / ENIC_RSS_BYTES_PER_KEY;
2202		bidx = i % ENIC_RSS_BYTES_PER_KEY;
2203		rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
2204	}
2205	spin_lock_bh(&enic->devcmd_lock);
2206	err = enic_set_rss_key(enic,
2207		rss_key_buf_pa,
2208		sizeof(union vnic_rss_key));
2209	spin_unlock_bh(&enic->devcmd_lock);
2210
2211	dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key),
2212			  rss_key_buf_va, rss_key_buf_pa);
2213
2214	return err;
2215}
2216
2217static int enic_set_rsskey(struct enic *enic)
2218{
2219	netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
2220
2221	return __enic_set_rsskey(enic);
2222}
2223
2224static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
2225{
2226	dma_addr_t rss_cpu_buf_pa;
2227	union vnic_rss_cpu *rss_cpu_buf_va = NULL;
2228	unsigned int i;
2229	int err;
2230
2231	rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev,
2232					    sizeof(union vnic_rss_cpu),
2233					    &rss_cpu_buf_pa, GFP_ATOMIC);
2234	if (!rss_cpu_buf_va)
2235		return -ENOMEM;
2236
2237	for (i = 0; i < (1 << rss_hash_bits); i++)
2238		(*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
2239
2240	spin_lock_bh(&enic->devcmd_lock);
2241	err = enic_set_rss_cpu(enic,
2242		rss_cpu_buf_pa,
2243		sizeof(union vnic_rss_cpu));
2244	spin_unlock_bh(&enic->devcmd_lock);
2245
2246	dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu),
2247			  rss_cpu_buf_va, rss_cpu_buf_pa);
2248
2249	return err;
2250}
2251
2252static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2253	u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
2254{
2255	const u8 tso_ipid_split_en = 0;
2256	const u8 ig_vlan_strip_en = 1;
2257	int err;
2258
2259	/* Enable VLAN tag stripping.
2260	*/
2261
2262	spin_lock_bh(&enic->devcmd_lock);
2263	err = enic_set_nic_cfg(enic,
2264		rss_default_cpu, rss_hash_type,
2265		rss_hash_bits, rss_base_cpu,
2266		rss_enable, tso_ipid_split_en,
2267		ig_vlan_strip_en);
2268	spin_unlock_bh(&enic->devcmd_lock);
2269
2270	return err;
2271}
2272
2273static int enic_set_rss_nic_cfg(struct enic *enic)
2274{
2275	struct device *dev = enic_get_dev(enic);
2276	const u8 rss_default_cpu = 0;
2277	const u8 rss_hash_bits = 7;
2278	const u8 rss_base_cpu = 0;
2279	u8 rss_hash_type;
2280	int res;
2281	u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2282
2283	spin_lock_bh(&enic->devcmd_lock);
2284	res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
2285	spin_unlock_bh(&enic->devcmd_lock);
2286	if (res) {
2287		/* defaults for old adapters
2288		 */
2289		rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4	|
2290				NIC_CFG_RSS_HASH_TYPE_TCP_IPV4	|
2291				NIC_CFG_RSS_HASH_TYPE_IPV6	|
2292				NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2293	}
2294
2295	if (rss_enable) {
2296		if (!enic_set_rsskey(enic)) {
2297			if (enic_set_rsscpu(enic, rss_hash_bits)) {
2298				rss_enable = 0;
2299				dev_warn(dev, "RSS disabled, "
2300					"Failed to set RSS cpu indirection table.");
2301			}
2302		} else {
2303			rss_enable = 0;
2304			dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2305		}
2306	}
2307
2308	return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2309		rss_hash_bits, rss_base_cpu, rss_enable);
2310}
2311
2312static void enic_set_api_busy(struct enic *enic, bool busy)
2313{
2314	spin_lock(&enic->enic_api_lock);
2315	enic->enic_api_busy = busy;
2316	spin_unlock(&enic->enic_api_lock);
2317}
2318
2319static void enic_reset(struct work_struct *work)
2320{
2321	struct enic *enic = container_of(work, struct enic, reset);
2322
2323	if (!netif_running(enic->netdev))
2324		return;
2325
2326	rtnl_lock();
2327
2328	/* Stop any activity from infiniband */
2329	enic_set_api_busy(enic, true);
2330
2331	enic_stop(enic->netdev);
2332	enic_dev_soft_reset(enic);
2333	enic_reset_addr_lists(enic);
2334	enic_init_vnic_resources(enic);
2335	enic_set_rss_nic_cfg(enic);
2336	enic_dev_set_ig_vlan_rewrite_mode(enic);
2337	enic_open(enic->netdev);
2338
2339	/* Allow infiniband to fiddle with the device again */
2340	enic_set_api_busy(enic, false);
2341
2342	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2343
2344	rtnl_unlock();
2345}
2346
2347static void enic_tx_hang_reset(struct work_struct *work)
2348{
2349	struct enic *enic = container_of(work, struct enic, tx_hang_reset);
2350
2351	rtnl_lock();
2352
2353	/* Stop any activity from infiniband */
2354	enic_set_api_busy(enic, true);
2355
2356	enic_dev_hang_notify(enic);
2357	enic_stop(enic->netdev);
2358	enic_dev_hang_reset(enic);
2359	enic_reset_addr_lists(enic);
2360	enic_init_vnic_resources(enic);
2361	enic_set_rss_nic_cfg(enic);
2362	enic_dev_set_ig_vlan_rewrite_mode(enic);
2363	enic_open(enic->netdev);
2364
2365	/* Allow infiniband to fiddle with the device again */
2366	enic_set_api_busy(enic, false);
2367
2368	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2369
2370	rtnl_unlock();
2371}
2372
2373static int enic_set_intr_mode(struct enic *enic)
2374{
2375	unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2376	unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2377	unsigned int i;
2378
2379	/* Set interrupt mode (INTx, MSI, MSI-X) depending
2380	 * on system capabilities.
2381	 *
2382	 * Try MSI-X first
2383	 *
2384	 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2385	 * (the second to last INTR is used for WQ/RQ errors)
2386	 * (the last INTR is used for notifications)
2387	 */
2388
2389	BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2390	for (i = 0; i < n + m + 2; i++)
2391		enic->msix_entry[i].entry = i;
2392
2393	/* Use multiple RQs if RSS is enabled
2394	 */
2395
2396	if (ENIC_SETTING(enic, RSS) &&
2397	    enic->config.intr_mode < 1 &&
2398	    enic->rq_count >= n &&
2399	    enic->wq_count >= m &&
2400	    enic->cq_count >= n + m &&
2401	    enic->intr_count >= n + m + 2) {
2402
2403		if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2404					  n + m + 2, n + m + 2) > 0) {
2405
2406			enic->rq_count = n;
2407			enic->wq_count = m;
2408			enic->cq_count = n + m;
2409			enic->intr_count = n + m + 2;
2410
2411			vnic_dev_set_intr_mode(enic->vdev,
2412				VNIC_DEV_INTR_MODE_MSIX);
2413
2414			return 0;
2415		}
2416	}
2417
2418	if (enic->config.intr_mode < 1 &&
2419	    enic->rq_count >= 1 &&
2420	    enic->wq_count >= m &&
2421	    enic->cq_count >= 1 + m &&
2422	    enic->intr_count >= 1 + m + 2) {
2423		if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2424					  1 + m + 2, 1 + m + 2) > 0) {
2425
2426			enic->rq_count = 1;
2427			enic->wq_count = m;
2428			enic->cq_count = 1 + m;
2429			enic->intr_count = 1 + m + 2;
2430
2431			vnic_dev_set_intr_mode(enic->vdev,
2432				VNIC_DEV_INTR_MODE_MSIX);
2433
2434			return 0;
2435		}
2436	}
2437
2438	/* Next try MSI
2439	 *
2440	 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2441	 */
2442
2443	if (enic->config.intr_mode < 2 &&
2444	    enic->rq_count >= 1 &&
2445	    enic->wq_count >= 1 &&
2446	    enic->cq_count >= 2 &&
2447	    enic->intr_count >= 1 &&
2448	    !pci_enable_msi(enic->pdev)) {
2449
2450		enic->rq_count = 1;
2451		enic->wq_count = 1;
2452		enic->cq_count = 2;
2453		enic->intr_count = 1;
2454
2455		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2456
2457		return 0;
2458	}
2459
2460	/* Next try INTx
2461	 *
2462	 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2463	 * (the first INTR is used for WQ/RQ)
2464	 * (the second INTR is used for WQ/RQ errors)
2465	 * (the last INTR is used for notifications)
2466	 */
2467
2468	if (enic->config.intr_mode < 3 &&
2469	    enic->rq_count >= 1 &&
2470	    enic->wq_count >= 1 &&
2471	    enic->cq_count >= 2 &&
2472	    enic->intr_count >= 3) {
2473
2474		enic->rq_count = 1;
2475		enic->wq_count = 1;
2476		enic->cq_count = 2;
2477		enic->intr_count = 3;
2478
2479		vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2480
2481		return 0;
2482	}
2483
2484	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2485
2486	return -EINVAL;
2487}
2488
2489static void enic_clear_intr_mode(struct enic *enic)
2490{
2491	switch (vnic_dev_get_intr_mode(enic->vdev)) {
2492	case VNIC_DEV_INTR_MODE_MSIX:
2493		pci_disable_msix(enic->pdev);
2494		break;
2495	case VNIC_DEV_INTR_MODE_MSI:
2496		pci_disable_msi(enic->pdev);
2497		break;
2498	default:
2499		break;
2500	}
2501
2502	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2503}
2504
2505static const struct net_device_ops enic_netdev_dynamic_ops = {
2506	.ndo_open		= enic_open,
2507	.ndo_stop		= enic_stop,
2508	.ndo_start_xmit		= enic_hard_start_xmit,
2509	.ndo_get_stats64	= enic_get_stats,
2510	.ndo_validate_addr	= eth_validate_addr,
2511	.ndo_set_rx_mode	= enic_set_rx_mode,
2512	.ndo_set_mac_address	= enic_set_mac_address_dynamic,
2513	.ndo_change_mtu		= enic_change_mtu,
2514	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
2515	.ndo_vlan_rx_kill_vid	= enic_vlan_rx_kill_vid,
2516	.ndo_tx_timeout		= enic_tx_timeout,
2517	.ndo_set_vf_port	= enic_set_vf_port,
2518	.ndo_get_vf_port	= enic_get_vf_port,
2519	.ndo_set_vf_mac		= enic_set_vf_mac,
2520#ifdef CONFIG_NET_POLL_CONTROLLER
2521	.ndo_poll_controller	= enic_poll_controller,
2522#endif
2523#ifdef CONFIG_RFS_ACCEL
2524	.ndo_rx_flow_steer	= enic_rx_flow_steer,
2525#endif
2526	.ndo_features_check	= enic_features_check,
2527};
2528
2529static const struct net_device_ops enic_netdev_ops = {
2530	.ndo_open		= enic_open,
2531	.ndo_stop		= enic_stop,
2532	.ndo_start_xmit		= enic_hard_start_xmit,
2533	.ndo_get_stats64	= enic_get_stats,
2534	.ndo_validate_addr	= eth_validate_addr,
2535	.ndo_set_mac_address	= enic_set_mac_address,
2536	.ndo_set_rx_mode	= enic_set_rx_mode,
2537	.ndo_change_mtu		= enic_change_mtu,
2538	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
2539	.ndo_vlan_rx_kill_vid	= enic_vlan_rx_kill_vid,
2540	.ndo_tx_timeout		= enic_tx_timeout,
2541	.ndo_set_vf_port	= enic_set_vf_port,
2542	.ndo_get_vf_port	= enic_get_vf_port,
2543	.ndo_set_vf_mac		= enic_set_vf_mac,
2544#ifdef CONFIG_NET_POLL_CONTROLLER
2545	.ndo_poll_controller	= enic_poll_controller,
2546#endif
2547#ifdef CONFIG_RFS_ACCEL
2548	.ndo_rx_flow_steer	= enic_rx_flow_steer,
2549#endif
2550	.ndo_features_check	= enic_features_check,
2551};
2552
2553static void enic_dev_deinit(struct enic *enic)
2554{
2555	unsigned int i;
2556
2557	for (i = 0; i < enic->rq_count; i++)
2558		__netif_napi_del(&enic->napi[i]);
2559
2560	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
2561		for (i = 0; i < enic->wq_count; i++)
2562			__netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
2563
2564	/* observe RCU grace period after __netif_napi_del() calls */
2565	synchronize_net();
2566
2567	enic_free_vnic_resources(enic);
2568	enic_clear_intr_mode(enic);
2569	enic_free_affinity_hint(enic);
2570}
2571
2572static void enic_kdump_kernel_config(struct enic *enic)
2573{
2574	if (is_kdump_kernel()) {
2575		dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
2576		enic->rq_count = 1;
2577		enic->wq_count = 1;
2578		enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
2579		enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
2580		enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
2581	}
2582}
2583
2584static int enic_dev_init(struct enic *enic)
2585{
2586	struct device *dev = enic_get_dev(enic);
2587	struct net_device *netdev = enic->netdev;
2588	unsigned int i;
2589	int err;
2590
2591	/* Get interrupt coalesce timer info */
2592	err = enic_dev_intr_coal_timer_info(enic);
2593	if (err) {
2594		dev_warn(dev, "Using default conversion factor for "
2595			"interrupt coalesce timer\n");
2596		vnic_dev_intr_coal_timer_info_default(enic->vdev);
2597	}
2598
2599	/* Get vNIC configuration
2600	 */
2601
2602	err = enic_get_vnic_config(enic);
2603	if (err) {
2604		dev_err(dev, "Get vNIC configuration failed, aborting\n");
2605		return err;
2606	}
2607
2608	/* Get available resource counts
2609	 */
2610
2611	enic_get_res_counts(enic);
2612
2613	/* modify resource count if we are in kdump_kernel
2614	 */
2615	enic_kdump_kernel_config(enic);
2616
2617	/* Set interrupt mode based on resource counts and system
2618	 * capabilities
2619	 */
2620
2621	err = enic_set_intr_mode(enic);
2622	if (err) {
2623		dev_err(dev, "Failed to set intr mode based on resource "
2624			"counts and system capabilities, aborting\n");
2625		return err;
2626	}
2627
2628	/* Allocate and configure vNIC resources
2629	 */
2630
2631	err = enic_alloc_vnic_resources(enic);
2632	if (err) {
2633		dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2634		goto err_out_free_vnic_resources;
2635	}
2636
2637	enic_init_vnic_resources(enic);
2638
2639	err = enic_set_rss_nic_cfg(enic);
2640	if (err) {
2641		dev_err(dev, "Failed to config nic, aborting\n");
2642		goto err_out_free_vnic_resources;
2643	}
2644
2645	switch (vnic_dev_get_intr_mode(enic->vdev)) {
2646	default:
2647		netif_napi_add(netdev, &enic->napi[0], enic_poll);
2648		break;
2649	case VNIC_DEV_INTR_MODE_MSIX:
2650		for (i = 0; i < enic->rq_count; i++) {
2651			netif_napi_add(netdev, &enic->napi[i],
2652				       enic_poll_msix_rq);
2653		}
2654		for (i = 0; i < enic->wq_count; i++)
2655			netif_napi_add(netdev,
2656				       &enic->napi[enic_cq_wq(enic, i)],
2657				       enic_poll_msix_wq);
2658		break;
2659	}
2660
2661	return 0;
2662
2663err_out_free_vnic_resources:
2664	enic_free_affinity_hint(enic);
2665	enic_clear_intr_mode(enic);
2666	enic_free_vnic_resources(enic);
2667
2668	return err;
2669}
2670
2671static void enic_iounmap(struct enic *enic)
2672{
2673	unsigned int i;
2674
2675	for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2676		if (enic->bar[i].vaddr)
2677			iounmap(enic->bar[i].vaddr);
2678}
2679
2680static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2681{
2682	struct device *dev = &pdev->dev;
2683	struct net_device *netdev;
2684	struct enic *enic;
2685	int using_dac = 0;
2686	unsigned int i;
2687	int err;
2688#ifdef CONFIG_PCI_IOV
2689	int pos = 0;
2690#endif
2691	int num_pps = 1;
2692
2693	/* Allocate net device structure and initialize.  Private
2694	 * instance data is initialized to zero.
2695	 */
2696
2697	netdev = alloc_etherdev_mqs(sizeof(struct enic),
2698				    ENIC_RQ_MAX, ENIC_WQ_MAX);
2699	if (!netdev)
2700		return -ENOMEM;
2701
2702	pci_set_drvdata(pdev, netdev);
2703
2704	SET_NETDEV_DEV(netdev, &pdev->dev);
2705
2706	enic = netdev_priv(netdev);
2707	enic->netdev = netdev;
2708	enic->pdev = pdev;
2709
2710	/* Setup PCI resources
2711	 */
2712
2713	err = pci_enable_device_mem(pdev);
2714	if (err) {
2715		dev_err(dev, "Cannot enable PCI device, aborting\n");
2716		goto err_out_free_netdev;
2717	}
2718
2719	err = pci_request_regions(pdev, DRV_NAME);
2720	if (err) {
2721		dev_err(dev, "Cannot request PCI regions, aborting\n");
2722		goto err_out_disable_device;
2723	}
2724
2725	pci_set_master(pdev);
2726
2727	/* Query PCI controller on system for DMA addressing
2728	 * limitation for the device.  Try 47-bit first, and
2729	 * fail to 32-bit.
2730	 */
2731
2732	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
2733	if (err) {
2734		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2735		if (err) {
2736			dev_err(dev, "No usable DMA configuration, aborting\n");
2737			goto err_out_release_regions;
2738		}
2739	} else {
2740		using_dac = 1;
2741	}
2742
2743	/* Map vNIC resources from BAR0-5
2744	 */
2745
2746	for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2747		if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2748			continue;
2749		enic->bar[i].len = pci_resource_len(pdev, i);
2750		enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2751		if (!enic->bar[i].vaddr) {
2752			dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2753			err = -ENODEV;
2754			goto err_out_iounmap;
2755		}
2756		enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2757	}
2758
2759	/* Register vNIC device
2760	 */
2761
2762	enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2763		ARRAY_SIZE(enic->bar));
2764	if (!enic->vdev) {
2765		dev_err(dev, "vNIC registration failed, aborting\n");
2766		err = -ENODEV;
2767		goto err_out_iounmap;
2768	}
2769
2770	err = vnic_devcmd_init(enic->vdev);
2771
2772	if (err)
2773		goto err_out_vnic_unregister;
2774
2775#ifdef CONFIG_PCI_IOV
2776	/* Get number of subvnics */
2777	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2778	if (pos) {
2779		pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
2780			&enic->num_vfs);
2781		if (enic->num_vfs) {
2782			err = pci_enable_sriov(pdev, enic->num_vfs);
2783			if (err) {
2784				dev_err(dev, "SRIOV enable failed, aborting."
2785					" pci_enable_sriov() returned %d\n",
2786					err);
2787				goto err_out_vnic_unregister;
2788			}
2789			enic->priv_flags |= ENIC_SRIOV_ENABLED;
2790			num_pps = enic->num_vfs;
2791		}
2792	}
2793#endif
2794
2795	/* Allocate structure for port profiles */
2796	enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2797	if (!enic->pp) {
2798		err = -ENOMEM;
2799		goto err_out_disable_sriov_pp;
2800	}
2801
2802	/* Issue device open to get device in known state
2803	 */
2804
2805	err = enic_dev_open(enic);
2806	if (err) {
2807		dev_err(dev, "vNIC dev open failed, aborting\n");
2808		goto err_out_disable_sriov;
2809	}
2810
2811	/* Setup devcmd lock
2812	 */
2813
2814	spin_lock_init(&enic->devcmd_lock);
2815	spin_lock_init(&enic->enic_api_lock);
2816
2817	/*
2818	 * Set ingress vlan rewrite mode before vnic initialization
2819	 */
2820
2821	err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2822	if (err) {
2823		dev_err(dev,
2824			"Failed to set ingress vlan rewrite mode, aborting.\n");
2825		goto err_out_dev_close;
2826	}
2827
2828	/* Issue device init to initialize the vnic-to-switch link.
2829	 * We'll start with carrier off and wait for link UP
2830	 * notification later to turn on carrier.  We don't need
2831	 * to wait here for the vnic-to-switch link initialization
2832	 * to complete; link UP notification is the indication that
2833	 * the process is complete.
2834	 */
2835
2836	netif_carrier_off(netdev);
2837
2838	/* Do not call dev_init for a dynamic vnic.
2839	 * For a dynamic vnic, init_prov_info will be
2840	 * called later by an upper layer.
2841	 */
2842
2843	if (!enic_is_dynamic(enic)) {
2844		err = vnic_dev_init(enic->vdev, 0);
2845		if (err) {
2846			dev_err(dev, "vNIC dev init failed, aborting\n");
2847			goto err_out_dev_close;
2848		}
2849	}
2850
2851	err = enic_dev_init(enic);
2852	if (err) {
2853		dev_err(dev, "Device initialization failed, aborting\n");
2854		goto err_out_dev_close;
2855	}
2856
2857	netif_set_real_num_tx_queues(netdev, enic->wq_count);
2858	netif_set_real_num_rx_queues(netdev, enic->rq_count);
2859
2860	/* Setup notification timer, HW reset task, and wq locks
2861	 */
2862
2863	timer_setup(&enic->notify_timer, enic_notify_timer, 0);
2864
2865	enic_rfs_flw_tbl_init(enic);
2866	enic_set_rx_coal_setting(enic);
2867	INIT_WORK(&enic->reset, enic_reset);
2868	INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
2869	INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2870
2871	for (i = 0; i < enic->wq_count; i++)
2872		spin_lock_init(&enic->wq_lock[i]);
2873
2874	/* Register net device
2875	 */
2876
2877	enic->port_mtu = enic->config.mtu;
2878
2879	err = enic_set_mac_addr(netdev, enic->mac_addr);
2880	if (err) {
2881		dev_err(dev, "Invalid MAC address, aborting\n");
2882		goto err_out_dev_deinit;
2883	}
2884
2885	enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2886	/* rx coalesce time already got initialized. This gets used
2887	 * if adaptive coal is turned off
2888	 */
2889	enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2890
2891	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2892		netdev->netdev_ops = &enic_netdev_dynamic_ops;
2893	else
2894		netdev->netdev_ops = &enic_netdev_ops;
2895
2896	netdev->watchdog_timeo = 2 * HZ;
2897	enic_set_ethtool_ops(netdev);
2898
2899	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2900	if (ENIC_SETTING(enic, LOOP)) {
2901		netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2902		enic->loop_enable = 1;
2903		enic->loop_tag = enic->config.loop_tag;
2904		dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2905	}
2906	if (ENIC_SETTING(enic, TXCSUM))
2907		netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2908	if (ENIC_SETTING(enic, TSO))
2909		netdev->hw_features |= NETIF_F_TSO |
2910			NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2911	if (ENIC_SETTING(enic, RSS))
2912		netdev->hw_features |= NETIF_F_RXHASH;
2913	if (ENIC_SETTING(enic, RXCSUM))
2914		netdev->hw_features |= NETIF_F_RXCSUM;
2915	if (ENIC_SETTING(enic, VXLAN)) {
2916		u64 patch_level;
2917		u64 a1 = 0;
2918
2919		netdev->hw_enc_features |= NETIF_F_RXCSUM		|
2920					   NETIF_F_TSO			|
2921					   NETIF_F_TSO6			|
2922					   NETIF_F_TSO_ECN		|
2923					   NETIF_F_GSO_UDP_TUNNEL	|
2924					   NETIF_F_HW_CSUM		|
2925					   NETIF_F_GSO_UDP_TUNNEL_CSUM;
2926		netdev->hw_features |= netdev->hw_enc_features;
2927		/* get bit mask from hw about supported offload bit level
2928		 * BIT(0) = fw supports patch_level 0
2929		 *	    fcoe bit = encap
2930		 *	    fcoe_fc_crc_ok = outer csum ok
2931		 * BIT(1) = always set by fw
2932		 * BIT(2) = fw supports patch_level 2
2933		 *	    BIT(0) in rss_hash = encap
2934		 *	    BIT(1,2) in rss_hash = outer_ip_csum_ok/
2935		 *				   outer_tcp_csum_ok
2936		 * used in enic_rq_indicate_buf
2937		 */
2938		err = vnic_dev_get_supported_feature_ver(enic->vdev,
2939							 VIC_FEATURE_VXLAN,
2940							 &patch_level, &a1);
2941		if (err)
2942			patch_level = 0;
2943		enic->vxlan.flags = (u8)a1;
2944		/* mask bits that are supported by driver
2945		 */
2946		patch_level &= BIT_ULL(0) | BIT_ULL(2);
2947		patch_level = fls(patch_level);
2948		patch_level = patch_level ? patch_level - 1 : 0;
2949		enic->vxlan.patch_level = patch_level;
2950
2951		if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 ||
2952		    enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) {
2953			netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4;
2954			if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)
2955				netdev->udp_tunnel_nic_info = &enic_udp_tunnels;
2956		}
2957	}
2958
2959	netdev->features |= netdev->hw_features;
2960	netdev->vlan_features |= netdev->features;
2961
2962#ifdef CONFIG_RFS_ACCEL
2963	netdev->hw_features |= NETIF_F_NTUPLE;
2964#endif
2965
2966	if (using_dac)
2967		netdev->features |= NETIF_F_HIGHDMA;
2968
2969	netdev->priv_flags |= IFF_UNICAST_FLT;
2970
2971	/* MTU range: 68 - 9000 */
2972	netdev->min_mtu = ENIC_MIN_MTU;
2973	netdev->max_mtu = ENIC_MAX_MTU;
2974	netdev->mtu	= enic->port_mtu;
2975
2976	err = register_netdev(netdev);
2977	if (err) {
2978		dev_err(dev, "Cannot register net device, aborting\n");
2979		goto err_out_dev_deinit;
2980	}
2981	enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
2982
2983	return 0;
2984
2985err_out_dev_deinit:
2986	enic_dev_deinit(enic);
2987err_out_dev_close:
2988	vnic_dev_close(enic->vdev);
2989err_out_disable_sriov:
2990	kfree(enic->pp);
2991err_out_disable_sriov_pp:
2992#ifdef CONFIG_PCI_IOV
2993	if (enic_sriov_enabled(enic)) {
2994		pci_disable_sriov(pdev);
2995		enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2996	}
2997#endif
2998err_out_vnic_unregister:
2999	vnic_dev_unregister(enic->vdev);
3000err_out_iounmap:
3001	enic_iounmap(enic);
3002err_out_release_regions:
3003	pci_release_regions(pdev);
3004err_out_disable_device:
3005	pci_disable_device(pdev);
3006err_out_free_netdev:
3007	free_netdev(netdev);
3008
3009	return err;
3010}
3011
3012static void enic_remove(struct pci_dev *pdev)
3013{
3014	struct net_device *netdev = pci_get_drvdata(pdev);
3015
3016	if (netdev) {
3017		struct enic *enic = netdev_priv(netdev);
3018
3019		cancel_work_sync(&enic->reset);
3020		cancel_work_sync(&enic->change_mtu_work);
3021		unregister_netdev(netdev);
3022		enic_dev_deinit(enic);
3023		vnic_dev_close(enic->vdev);
3024#ifdef CONFIG_PCI_IOV
3025		if (enic_sriov_enabled(enic)) {
3026			pci_disable_sriov(pdev);
3027			enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
3028		}
3029#endif
3030		kfree(enic->pp);
3031		vnic_dev_unregister(enic->vdev);
3032		enic_iounmap(enic);
3033		pci_release_regions(pdev);
3034		pci_disable_device(pdev);
3035		free_netdev(netdev);
3036	}
3037}
3038
3039static struct pci_driver enic_driver = {
3040	.name = DRV_NAME,
3041	.id_table = enic_id_table,
3042	.probe = enic_probe,
3043	.remove = enic_remove,
3044};
3045
3046module_pci_driver(enic_driver);