Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright (c) 2016-2017 Hisilicon Limited.
   3
   4#include <linux/dma-mapping.h>
   5#include <linux/etherdevice.h>
   6#include <linux/interrupt.h>
   7#ifdef CONFIG_RFS_ACCEL
   8#include <linux/cpu_rmap.h>
   9#endif
  10#include <linux/if_vlan.h>
  11#include <linux/ip.h>
  12#include <linux/ipv6.h>
  13#include <linux/module.h>
  14#include <linux/pci.h>
  15#include <linux/aer.h>
  16#include <linux/skbuff.h>
  17#include <linux/sctp.h>
  18#include <linux/vermagic.h>
  19#include <net/gre.h>
  20#include <net/ip6_checksum.h>
  21#include <net/pkt_cls.h>
  22#include <net/tcp.h>
  23#include <net/vxlan.h>
  24
  25#include "hnae3.h"
  26#include "hns3_enet.h"
  27
  28#define hns3_set_field(origin, shift, val)	((origin) |= ((val) << (shift)))
  29#define hns3_tx_bd_count(S)	DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
  30
  31#define hns3_rl_err(fmt, ...)						\
  32	do {								\
  33		if (net_ratelimit())					\
  34			netdev_err(fmt, ##__VA_ARGS__);			\
  35	} while (0)
  36
  37static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
  38static void hns3_remove_hw_addr(struct net_device *netdev);
  39
  40static const char hns3_driver_name[] = "hns3";
  41const char hns3_driver_version[] = VERMAGIC_STRING;
  42static const char hns3_driver_string[] =
  43			"Hisilicon Ethernet Network Driver for Hip08 Family";
  44static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
  45static struct hnae3_client client;
  46
  47static int debug = -1;
  48module_param(debug, int, 0);
  49MODULE_PARM_DESC(debug, " Network interface message level setting");
  50
  51#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  52			   NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
  53
  54#define HNS3_INNER_VLAN_TAG	1
  55#define HNS3_OUTER_VLAN_TAG	2
  56
  57/* hns3_pci_tbl - PCI Device ID Table
  58 *
  59 * Last entry must be all 0s
  60 *
  61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  62 *   Class, Class Mask, private data (not used) }
  63 */
  64static const struct pci_device_id hns3_pci_tbl[] = {
  65	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
  66	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
  67	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
  68	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  69	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
  70	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  71	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
  72	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  73	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
  74	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  75	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
  76	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  77	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
  78	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
  79	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  80	/* required last entry */
  81	{0, }
  82};
  83MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
  84
  85static irqreturn_t hns3_irq_handle(int irq, void *vector)
  86{
  87	struct hns3_enet_tqp_vector *tqp_vector = vector;
  88
  89	napi_schedule_irqoff(&tqp_vector->napi);
  90
  91	return IRQ_HANDLED;
  92}
  93
  94static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
  95{
  96	struct hns3_enet_tqp_vector *tqp_vectors;
  97	unsigned int i;
  98
  99	for (i = 0; i < priv->vector_num; i++) {
 100		tqp_vectors = &priv->tqp_vector[i];
 101
 102		if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
 103			continue;
 104
 105		/* clear the affinity mask */
 106		irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
 107
 108		/* release the irq resource */
 109		free_irq(tqp_vectors->vector_irq, tqp_vectors);
 110		tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
 111	}
 112}
 113
 114static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
 115{
 116	struct hns3_enet_tqp_vector *tqp_vectors;
 117	int txrx_int_idx = 0;
 118	int rx_int_idx = 0;
 119	int tx_int_idx = 0;
 120	unsigned int i;
 121	int ret;
 122
 123	for (i = 0; i < priv->vector_num; i++) {
 124		tqp_vectors = &priv->tqp_vector[i];
 125
 126		if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
 127			continue;
 128
 129		if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
 130			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 131				 "%s-%s-%d", priv->netdev->name, "TxRx",
 132				 txrx_int_idx++);
 133			txrx_int_idx++;
 134		} else if (tqp_vectors->rx_group.ring) {
 135			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 136				 "%s-%s-%d", priv->netdev->name, "Rx",
 137				 rx_int_idx++);
 138		} else if (tqp_vectors->tx_group.ring) {
 139			snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 140				 "%s-%s-%d", priv->netdev->name, "Tx",
 141				 tx_int_idx++);
 142		} else {
 143			/* Skip this unused q_vector */
 144			continue;
 145		}
 146
 147		tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
 148
 149		ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
 150				  tqp_vectors->name, tqp_vectors);
 151		if (ret) {
 152			netdev_err(priv->netdev, "request irq(%d) fail\n",
 153				   tqp_vectors->vector_irq);
 154			hns3_nic_uninit_irq(priv);
 155			return ret;
 156		}
 157
 158		irq_set_affinity_hint(tqp_vectors->vector_irq,
 159				      &tqp_vectors->affinity_mask);
 160
 161		tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
 162	}
 163
 164	return 0;
 165}
 166
 167static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
 168				 u32 mask_en)
 169{
 170	writel(mask_en, tqp_vector->mask_addr);
 171}
 172
 173static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
 174{
 175	napi_enable(&tqp_vector->napi);
 176
 177	/* enable vector */
 178	hns3_mask_vector_irq(tqp_vector, 1);
 179}
 180
 181static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
 182{
 183	/* disable vector */
 184	hns3_mask_vector_irq(tqp_vector, 0);
 185
 186	disable_irq(tqp_vector->vector_irq);
 187	napi_disable(&tqp_vector->napi);
 188}
 189
 190void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
 191				 u32 rl_value)
 192{
 193	u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
 194
 195	/* this defines the configuration for RL (Interrupt Rate Limiter).
 196	 * Rl defines rate of interrupts i.e. number of interrupts-per-second
 197	 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
 198	 */
 199
 200	if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
 201	    !tqp_vector->rx_group.coal.gl_adapt_enable)
 202		/* According to the hardware, the range of rl_reg is
 203		 * 0-59 and the unit is 4.
 204		 */
 205		rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
 206
 207	writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
 208}
 209
 210void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
 211				    u32 gl_value)
 212{
 213	u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
 214
 215	writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
 216}
 217
 218void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
 219				    u32 gl_value)
 220{
 221	u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
 222
 223	writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
 224}
 225
 226static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
 227				   struct hns3_nic_priv *priv)
 228{
 229	/* initialize the configuration for interrupt coalescing.
 230	 * 1. GL (Interrupt Gap Limiter)
 231	 * 2. RL (Interrupt Rate Limiter)
 232	 *
 233	 * Default: enable interrupt coalescing self-adaptive and GL
 234	 */
 235	tqp_vector->tx_group.coal.gl_adapt_enable = 1;
 236	tqp_vector->rx_group.coal.gl_adapt_enable = 1;
 237
 238	tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
 239	tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
 240
 241	tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
 242	tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
 243}
 244
 245static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
 246				      struct hns3_nic_priv *priv)
 247{
 248	struct hnae3_handle *h = priv->ae_handle;
 249
 250	hns3_set_vector_coalesce_tx_gl(tqp_vector,
 251				       tqp_vector->tx_group.coal.int_gl);
 252	hns3_set_vector_coalesce_rx_gl(tqp_vector,
 253				       tqp_vector->rx_group.coal.int_gl);
 254	hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
 255}
 256
 257static int hns3_nic_set_real_num_queue(struct net_device *netdev)
 258{
 259	struct hnae3_handle *h = hns3_get_handle(netdev);
 260	struct hnae3_knic_private_info *kinfo = &h->kinfo;
 261	unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
 262	int i, ret;
 263
 264	if (kinfo->num_tc <= 1) {
 265		netdev_reset_tc(netdev);
 266	} else {
 267		ret = netdev_set_num_tc(netdev, kinfo->num_tc);
 268		if (ret) {
 269			netdev_err(netdev,
 270				   "netdev_set_num_tc fail, ret=%d!\n", ret);
 271			return ret;
 272		}
 273
 274		for (i = 0; i < HNAE3_MAX_TC; i++) {
 275			if (!kinfo->tc_info[i].enable)
 276				continue;
 277
 278			netdev_set_tc_queue(netdev,
 279					    kinfo->tc_info[i].tc,
 280					    kinfo->tc_info[i].tqp_count,
 281					    kinfo->tc_info[i].tqp_offset);
 282		}
 283	}
 284
 285	ret = netif_set_real_num_tx_queues(netdev, queue_size);
 286	if (ret) {
 287		netdev_err(netdev,
 288			   "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
 289		return ret;
 290	}
 291
 292	ret = netif_set_real_num_rx_queues(netdev, queue_size);
 293	if (ret) {
 294		netdev_err(netdev,
 295			   "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
 296		return ret;
 297	}
 298
 299	return 0;
 300}
 301
 302static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
 303{
 304	u16 alloc_tqps, max_rss_size, rss_size;
 305
 306	h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
 307	rss_size = alloc_tqps / h->kinfo.num_tc;
 308
 309	return min_t(u16, rss_size, max_rss_size);
 310}
 311
 312static void hns3_tqp_enable(struct hnae3_queue *tqp)
 313{
 314	u32 rcb_reg;
 315
 316	rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
 317	rcb_reg |= BIT(HNS3_RING_EN_B);
 318	hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
 319}
 320
 321static void hns3_tqp_disable(struct hnae3_queue *tqp)
 322{
 323	u32 rcb_reg;
 324
 325	rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
 326	rcb_reg &= ~BIT(HNS3_RING_EN_B);
 327	hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
 328}
 329
 330static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
 331{
 332#ifdef CONFIG_RFS_ACCEL
 333	free_irq_cpu_rmap(netdev->rx_cpu_rmap);
 334	netdev->rx_cpu_rmap = NULL;
 335#endif
 336}
 337
 338static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
 339{
 340#ifdef CONFIG_RFS_ACCEL
 341	struct hns3_nic_priv *priv = netdev_priv(netdev);
 342	struct hns3_enet_tqp_vector *tqp_vector;
 343	int i, ret;
 344
 345	if (!netdev->rx_cpu_rmap) {
 346		netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
 347		if (!netdev->rx_cpu_rmap)
 348			return -ENOMEM;
 349	}
 350
 351	for (i = 0; i < priv->vector_num; i++) {
 352		tqp_vector = &priv->tqp_vector[i];
 353		ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
 354				       tqp_vector->vector_irq);
 355		if (ret) {
 356			hns3_free_rx_cpu_rmap(netdev);
 357			return ret;
 358		}
 359	}
 360#endif
 361	return 0;
 362}
 363
 364static int hns3_nic_net_up(struct net_device *netdev)
 365{
 366	struct hns3_nic_priv *priv = netdev_priv(netdev);
 367	struct hnae3_handle *h = priv->ae_handle;
 368	int i, j;
 369	int ret;
 370
 371	ret = hns3_nic_reset_all_ring(h);
 372	if (ret)
 373		return ret;
 374
 375	/* the device can work without cpu rmap, only aRFS needs it */
 376	ret = hns3_set_rx_cpu_rmap(netdev);
 377	if (ret)
 378		netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
 379
 380	/* get irq resource for all vectors */
 381	ret = hns3_nic_init_irq(priv);
 382	if (ret) {
 383		netdev_err(netdev, "init irq failed! ret=%d\n", ret);
 384		goto free_rmap;
 385	}
 386
 387	clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
 388
 389	/* enable the vectors */
 390	for (i = 0; i < priv->vector_num; i++)
 391		hns3_vector_enable(&priv->tqp_vector[i]);
 392
 393	/* enable rcb */
 394	for (j = 0; j < h->kinfo.num_tqps; j++)
 395		hns3_tqp_enable(h->kinfo.tqp[j]);
 396
 397	/* start the ae_dev */
 398	ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
 399	if (ret)
 400		goto out_start_err;
 401
 402	return 0;
 403
 404out_start_err:
 405	set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
 406	while (j--)
 407		hns3_tqp_disable(h->kinfo.tqp[j]);
 408
 409	for (j = i - 1; j >= 0; j--)
 410		hns3_vector_disable(&priv->tqp_vector[j]);
 411
 412	hns3_nic_uninit_irq(priv);
 413free_rmap:
 414	hns3_free_rx_cpu_rmap(netdev);
 415	return ret;
 416}
 417
 418static void hns3_config_xps(struct hns3_nic_priv *priv)
 419{
 420	int i;
 421
 422	for (i = 0; i < priv->vector_num; i++) {
 423		struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
 424		struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
 425
 426		while (ring) {
 427			int ret;
 428
 429			ret = netif_set_xps_queue(priv->netdev,
 430						  &tqp_vector->affinity_mask,
 431						  ring->tqp->tqp_index);
 432			if (ret)
 433				netdev_warn(priv->netdev,
 434					    "set xps queue failed: %d", ret);
 435
 436			ring = ring->next;
 437		}
 438	}
 439}
 440
 441static int hns3_nic_net_open(struct net_device *netdev)
 442{
 443	struct hns3_nic_priv *priv = netdev_priv(netdev);
 444	struct hnae3_handle *h = hns3_get_handle(netdev);
 445	struct hnae3_knic_private_info *kinfo;
 446	int i, ret;
 447
 448	if (hns3_nic_resetting(netdev))
 449		return -EBUSY;
 450
 451	netif_carrier_off(netdev);
 452
 453	ret = hns3_nic_set_real_num_queue(netdev);
 454	if (ret)
 455		return ret;
 456
 457	ret = hns3_nic_net_up(netdev);
 458	if (ret) {
 459		netdev_err(netdev, "net up fail, ret=%d!\n", ret);
 460		return ret;
 461	}
 462
 463	kinfo = &h->kinfo;
 464	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
 465		netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
 466
 467	if (h->ae_algo->ops->set_timer_task)
 468		h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
 469
 470	hns3_config_xps(priv);
 471
 472	netif_dbg(h, drv, netdev, "net open\n");
 473
 474	return 0;
 475}
 476
 477static void hns3_reset_tx_queue(struct hnae3_handle *h)
 478{
 479	struct net_device *ndev = h->kinfo.netdev;
 480	struct hns3_nic_priv *priv = netdev_priv(ndev);
 481	struct netdev_queue *dev_queue;
 482	u32 i;
 483
 484	for (i = 0; i < h->kinfo.num_tqps; i++) {
 485		dev_queue = netdev_get_tx_queue(ndev,
 486						priv->ring_data[i].queue_index);
 487		netdev_tx_reset_queue(dev_queue);
 488	}
 489}
 490
 491static void hns3_nic_net_down(struct net_device *netdev)
 492{
 493	struct hns3_nic_priv *priv = netdev_priv(netdev);
 494	struct hnae3_handle *h = hns3_get_handle(netdev);
 495	const struct hnae3_ae_ops *ops;
 496	int i;
 497
 498	/* disable vectors */
 499	for (i = 0; i < priv->vector_num; i++)
 500		hns3_vector_disable(&priv->tqp_vector[i]);
 501
 502	/* disable rcb */
 503	for (i = 0; i < h->kinfo.num_tqps; i++)
 504		hns3_tqp_disable(h->kinfo.tqp[i]);
 505
 506	/* stop ae_dev */
 507	ops = priv->ae_handle->ae_algo->ops;
 508	if (ops->stop)
 509		ops->stop(priv->ae_handle);
 510
 511	hns3_free_rx_cpu_rmap(netdev);
 512
 513	/* free irq resources */
 514	hns3_nic_uninit_irq(priv);
 515
 516	/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
 517	 * during reset process, because driver may not be able
 518	 * to disable the ring through firmware when downing the netdev.
 519	 */
 520	if (!hns3_nic_resetting(netdev))
 521		hns3_clear_all_ring(priv->ae_handle, false);
 522
 523	hns3_reset_tx_queue(priv->ae_handle);
 524}
 525
 526static int hns3_nic_net_stop(struct net_device *netdev)
 527{
 528	struct hns3_nic_priv *priv = netdev_priv(netdev);
 529	struct hnae3_handle *h = hns3_get_handle(netdev);
 530
 531	if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
 532		return 0;
 533
 534	netif_dbg(h, drv, netdev, "net stop\n");
 535
 536	if (h->ae_algo->ops->set_timer_task)
 537		h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
 538
 539	netif_tx_stop_all_queues(netdev);
 540	netif_carrier_off(netdev);
 541
 542	hns3_nic_net_down(netdev);
 543
 544	return 0;
 545}
 546
 547static int hns3_nic_uc_sync(struct net_device *netdev,
 548			    const unsigned char *addr)
 549{
 550	struct hnae3_handle *h = hns3_get_handle(netdev);
 551
 552	if (h->ae_algo->ops->add_uc_addr)
 553		return h->ae_algo->ops->add_uc_addr(h, addr);
 554
 555	return 0;
 556}
 557
 558static int hns3_nic_uc_unsync(struct net_device *netdev,
 559			      const unsigned char *addr)
 560{
 561	struct hnae3_handle *h = hns3_get_handle(netdev);
 562
 563	if (h->ae_algo->ops->rm_uc_addr)
 564		return h->ae_algo->ops->rm_uc_addr(h, addr);
 565
 566	return 0;
 567}
 568
 569static int hns3_nic_mc_sync(struct net_device *netdev,
 570			    const unsigned char *addr)
 571{
 572	struct hnae3_handle *h = hns3_get_handle(netdev);
 573
 574	if (h->ae_algo->ops->add_mc_addr)
 575		return h->ae_algo->ops->add_mc_addr(h, addr);
 576
 577	return 0;
 578}
 579
 580static int hns3_nic_mc_unsync(struct net_device *netdev,
 581			      const unsigned char *addr)
 582{
 583	struct hnae3_handle *h = hns3_get_handle(netdev);
 584
 585	if (h->ae_algo->ops->rm_mc_addr)
 586		return h->ae_algo->ops->rm_mc_addr(h, addr);
 587
 588	return 0;
 589}
 590
 591static u8 hns3_get_netdev_flags(struct net_device *netdev)
 592{
 593	u8 flags = 0;
 594
 595	if (netdev->flags & IFF_PROMISC) {
 596		flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
 597	} else {
 598		flags |= HNAE3_VLAN_FLTR;
 599		if (netdev->flags & IFF_ALLMULTI)
 600			flags |= HNAE3_USER_MPE;
 601	}
 602
 603	return flags;
 604}
 605
 606static void hns3_nic_set_rx_mode(struct net_device *netdev)
 607{
 608	struct hnae3_handle *h = hns3_get_handle(netdev);
 609	u8 new_flags;
 610	int ret;
 611
 612	new_flags = hns3_get_netdev_flags(netdev);
 613
 614	ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
 615	if (ret) {
 616		netdev_err(netdev, "sync uc address fail\n");
 617		if (ret == -ENOSPC)
 618			new_flags |= HNAE3_OVERFLOW_UPE;
 619	}
 620
 621	if (netdev->flags & IFF_MULTICAST) {
 622		ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
 623				    hns3_nic_mc_unsync);
 624		if (ret) {
 625			netdev_err(netdev, "sync mc address fail\n");
 626			if (ret == -ENOSPC)
 627				new_flags |= HNAE3_OVERFLOW_MPE;
 628		}
 629	}
 630
 631	/* User mode Promisc mode enable and vlan filtering is disabled to
 632	 * let all packets in. MAC-VLAN Table overflow Promisc enabled and
 633	 * vlan fitering is enabled
 634	 */
 635	hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
 636	h->netdev_flags = new_flags;
 637	hns3_update_promisc_mode(netdev, new_flags);
 638}
 639
 640int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
 641{
 642	struct hns3_nic_priv *priv = netdev_priv(netdev);
 643	struct hnae3_handle *h = priv->ae_handle;
 644
 645	if (h->ae_algo->ops->set_promisc_mode) {
 646		return h->ae_algo->ops->set_promisc_mode(h,
 647						promisc_flags & HNAE3_UPE,
 648						promisc_flags & HNAE3_MPE);
 649	}
 650
 651	return 0;
 652}
 653
 654void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
 655{
 656	struct hns3_nic_priv *priv = netdev_priv(netdev);
 657	struct hnae3_handle *h = priv->ae_handle;
 658	bool last_state;
 659
 660	if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
 661		last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
 662		if (enable != last_state) {
 663			netdev_info(netdev,
 664				    "%s vlan filter\n",
 665				    enable ? "enable" : "disable");
 666			h->ae_algo->ops->enable_vlan_filter(h, enable);
 667		}
 668	}
 669}
 670
 671static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
 672			u16 *mss, u32 *type_cs_vlan_tso)
 673{
 674	u32 l4_offset, hdr_len;
 675	union l3_hdr_info l3;
 676	union l4_hdr_info l4;
 677	u32 l4_paylen;
 678	int ret;
 679
 680	if (!skb_is_gso(skb))
 681		return 0;
 682
 683	ret = skb_cow_head(skb, 0);
 684	if (unlikely(ret))
 685		return ret;
 686
 687	l3.hdr = skb_network_header(skb);
 688	l4.hdr = skb_transport_header(skb);
 689
 690	/* Software should clear the IPv4's checksum field when tso is
 691	 * needed.
 692	 */
 693	if (l3.v4->version == 4)
 694		l3.v4->check = 0;
 695
 696	/* tunnel packet */
 697	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
 698					 SKB_GSO_GRE_CSUM |
 699					 SKB_GSO_UDP_TUNNEL |
 700					 SKB_GSO_UDP_TUNNEL_CSUM)) {
 701		if ((!(skb_shinfo(skb)->gso_type &
 702		    SKB_GSO_PARTIAL)) &&
 703		    (skb_shinfo(skb)->gso_type &
 704		    SKB_GSO_UDP_TUNNEL_CSUM)) {
 705			/* Software should clear the udp's checksum
 706			 * field when tso is needed.
 707			 */
 708			l4.udp->check = 0;
 709		}
 710		/* reset l3&l4 pointers from outer to inner headers */
 711		l3.hdr = skb_inner_network_header(skb);
 712		l4.hdr = skb_inner_transport_header(skb);
 713
 714		/* Software should clear the IPv4's checksum field when
 715		 * tso is needed.
 716		 */
 717		if (l3.v4->version == 4)
 718			l3.v4->check = 0;
 719	}
 720
 721	/* normal or tunnel packet */
 722	l4_offset = l4.hdr - skb->data;
 723	hdr_len = (l4.tcp->doff << 2) + l4_offset;
 724
 725	/* remove payload length from inner pseudo checksum when tso */
 726	l4_paylen = skb->len - l4_offset;
 727	csum_replace_by_diff(&l4.tcp->check,
 728			     (__force __wsum)htonl(l4_paylen));
 729
 730	/* find the txbd field values */
 731	*paylen = skb->len - hdr_len;
 732	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
 733
 734	/* get MSS for TSO */
 735	*mss = skb_shinfo(skb)->gso_size;
 736
 737	return 0;
 738}
 739
 740static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
 741				u8 *il4_proto)
 742{
 743	union l3_hdr_info l3;
 744	unsigned char *l4_hdr;
 745	unsigned char *exthdr;
 746	u8 l4_proto_tmp;
 747	__be16 frag_off;
 748
 749	/* find outer header point */
 750	l3.hdr = skb_network_header(skb);
 751	l4_hdr = skb_transport_header(skb);
 752
 753	if (skb->protocol == htons(ETH_P_IPV6)) {
 754		exthdr = l3.hdr + sizeof(*l3.v6);
 755		l4_proto_tmp = l3.v6->nexthdr;
 756		if (l4_hdr != exthdr)
 757			ipv6_skip_exthdr(skb, exthdr - skb->data,
 758					 &l4_proto_tmp, &frag_off);
 759	} else if (skb->protocol == htons(ETH_P_IP)) {
 760		l4_proto_tmp = l3.v4->protocol;
 761	} else {
 762		return -EINVAL;
 763	}
 764
 765	*ol4_proto = l4_proto_tmp;
 766
 767	/* tunnel packet */
 768	if (!skb->encapsulation) {
 769		*il4_proto = 0;
 770		return 0;
 771	}
 772
 773	/* find inner header point */
 774	l3.hdr = skb_inner_network_header(skb);
 775	l4_hdr = skb_inner_transport_header(skb);
 776
 777	if (l3.v6->version == 6) {
 778		exthdr = l3.hdr + sizeof(*l3.v6);
 779		l4_proto_tmp = l3.v6->nexthdr;
 780		if (l4_hdr != exthdr)
 781			ipv6_skip_exthdr(skb, exthdr - skb->data,
 782					 &l4_proto_tmp, &frag_off);
 783	} else if (l3.v4->version == 4) {
 784		l4_proto_tmp = l3.v4->protocol;
 785	}
 786
 787	*il4_proto = l4_proto_tmp;
 788
 789	return 0;
 790}
 791
 792/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
 793 * and it is udp packet, which has a dest port as the IANA assigned.
 794 * the hardware is expected to do the checksum offload, but the
 795 * hardware will not do the checksum offload when udp dest port is
 796 * 4789.
 797 */
 798static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
 799{
 800	union l4_hdr_info l4;
 801
 802	l4.hdr = skb_transport_header(skb);
 803
 804	if (!(!skb->encapsulation &&
 805	      l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
 806		return false;
 807
 808	skb_checksum_help(skb);
 809
 810	return true;
 811}
 812
 813static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
 814				  u32 *ol_type_vlan_len_msec)
 815{
 816	u32 l2_len, l3_len, l4_len;
 817	unsigned char *il2_hdr;
 818	union l3_hdr_info l3;
 819	union l4_hdr_info l4;
 820
 821	l3.hdr = skb_network_header(skb);
 822	l4.hdr = skb_transport_header(skb);
 823
 824	/* compute OL2 header size, defined in 2 Bytes */
 825	l2_len = l3.hdr - skb->data;
 826	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
 827
 828	/* compute OL3 header size, defined in 4 Bytes */
 829	l3_len = l4.hdr - l3.hdr;
 830	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
 831
 832	il2_hdr = skb_inner_mac_header(skb);
 833	/* compute OL4 header size, defined in 4 Bytes */
 834	l4_len = il2_hdr - l4.hdr;
 835	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
 836
 837	/* define outer network header type */
 838	if (skb->protocol == htons(ETH_P_IP)) {
 839		if (skb_is_gso(skb))
 840			hns3_set_field(*ol_type_vlan_len_msec,
 841				       HNS3_TXD_OL3T_S,
 842				       HNS3_OL3T_IPV4_CSUM);
 843		else
 844			hns3_set_field(*ol_type_vlan_len_msec,
 845				       HNS3_TXD_OL3T_S,
 846				       HNS3_OL3T_IPV4_NO_CSUM);
 847
 848	} else if (skb->protocol == htons(ETH_P_IPV6)) {
 849		hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
 850			       HNS3_OL3T_IPV6);
 851	}
 852
 853	if (ol4_proto == IPPROTO_UDP)
 854		hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
 855			       HNS3_TUN_MAC_IN_UDP);
 856	else if (ol4_proto == IPPROTO_GRE)
 857		hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
 858			       HNS3_TUN_NVGRE);
 859}
 860
 861static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
 862			   u8 il4_proto, u32 *type_cs_vlan_tso,
 863			   u32 *ol_type_vlan_len_msec)
 864{
 865	unsigned char *l2_hdr = skb->data;
 866	u32 l4_proto = ol4_proto;
 867	union l4_hdr_info l4;
 868	union l3_hdr_info l3;
 869	u32 l2_len, l3_len;
 870
 871	l4.hdr = skb_transport_header(skb);
 872	l3.hdr = skb_network_header(skb);
 873
 874	/* handle encapsulation skb */
 875	if (skb->encapsulation) {
 876		/* If this is a not UDP/GRE encapsulation skb */
 877		if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
 878			/* drop the skb tunnel packet if hardware don't support,
 879			 * because hardware can't calculate csum when TSO.
 880			 */
 881			if (skb_is_gso(skb))
 882				return -EDOM;
 883
 884			/* the stack computes the IP header already,
 885			 * driver calculate l4 checksum when not TSO.
 886			 */
 887			skb_checksum_help(skb);
 888			return 0;
 889		}
 890
 891		hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
 892
 893		/* switch to inner header */
 894		l2_hdr = skb_inner_mac_header(skb);
 895		l3.hdr = skb_inner_network_header(skb);
 896		l4.hdr = skb_inner_transport_header(skb);
 897		l4_proto = il4_proto;
 898	}
 899
 900	if (l3.v4->version == 4) {
 901		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
 902			       HNS3_L3T_IPV4);
 903
 904		/* the stack computes the IP header already, the only time we
 905		 * need the hardware to recompute it is in the case of TSO.
 906		 */
 907		if (skb_is_gso(skb))
 908			hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
 909	} else if (l3.v6->version == 6) {
 910		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
 911			       HNS3_L3T_IPV6);
 912	}
 913
 914	/* compute inner(/normal) L2 header size, defined in 2 Bytes */
 915	l2_len = l3.hdr - l2_hdr;
 916	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
 917
 918	/* compute inner(/normal) L3 header size, defined in 4 Bytes */
 919	l3_len = l4.hdr - l3.hdr;
 920	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
 921
 922	/* compute inner(/normal) L4 header size, defined in 4 Bytes */
 923	switch (l4_proto) {
 924	case IPPROTO_TCP:
 925		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 926		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
 927			       HNS3_L4T_TCP);
 928		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
 929			       l4.tcp->doff);
 930		break;
 931	case IPPROTO_UDP:
 932		if (hns3_tunnel_csum_bug(skb))
 933			break;
 934
 935		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 936		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
 937			       HNS3_L4T_UDP);
 938		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
 939			       (sizeof(struct udphdr) >> 2));
 940		break;
 941	case IPPROTO_SCTP:
 942		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 943		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
 944			       HNS3_L4T_SCTP);
 945		hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
 946			       (sizeof(struct sctphdr) >> 2));
 947		break;
 948	default:
 949		/* drop the skb tunnel packet if hardware don't support,
 950		 * because hardware can't calculate csum when TSO.
 951		 */
 952		if (skb_is_gso(skb))
 953			return -EDOM;
 954
 955		/* the stack computes the IP header already,
 956		 * driver calculate l4 checksum when not TSO.
 957		 */
 958		skb_checksum_help(skb);
 959		return 0;
 960	}
 961
 962	return 0;
 963}
 964
 965static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
 966{
 967	/* Config bd buffer end */
 968	if (!!frag_end)
 969		hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, 1U);
 970	hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
 971}
 972
 973static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
 974			     struct sk_buff *skb)
 975{
 976	struct hnae3_handle *handle = tx_ring->tqp->handle;
 977	struct vlan_ethhdr *vhdr;
 978	int rc;
 979
 980	if (!(skb->protocol == htons(ETH_P_8021Q) ||
 981	      skb_vlan_tag_present(skb)))
 982		return 0;
 983
 984	/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
 985	 * header is allowed in skb, otherwise it will cause RAS error.
 986	 */
 987	if (unlikely(skb_vlan_tagged_multi(skb) &&
 988		     handle->port_base_vlan_state ==
 989		     HNAE3_PORT_BASE_VLAN_ENABLE))
 990		return -EINVAL;
 991
 992	if (skb->protocol == htons(ETH_P_8021Q) &&
 993	    !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
 994		/* When HW VLAN acceleration is turned off, and the stack
 995		 * sets the protocol to 802.1q, the driver just need to
 996		 * set the protocol to the encapsulated ethertype.
 997		 */
 998		skb->protocol = vlan_get_protocol(skb);
 999		return 0;
1000	}
1001
1002	if (skb_vlan_tag_present(skb)) {
1003		/* Based on hw strategy, use out_vtag in two layer tag case,
1004		 * and use inner_vtag in one tag case.
1005		 */
1006		if (skb->protocol == htons(ETH_P_8021Q) &&
1007		    handle->port_base_vlan_state ==
1008		    HNAE3_PORT_BASE_VLAN_DISABLE)
1009			rc = HNS3_OUTER_VLAN_TAG;
1010		else
1011			rc = HNS3_INNER_VLAN_TAG;
1012
1013		skb->protocol = vlan_get_protocol(skb);
1014		return rc;
1015	}
1016
1017	rc = skb_cow_head(skb, 0);
1018	if (unlikely(rc < 0))
1019		return rc;
1020
1021	vhdr = (struct vlan_ethhdr *)skb->data;
1022	vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
1023					 & VLAN_PRIO_MASK);
1024
1025	skb->protocol = vlan_get_protocol(skb);
1026	return 0;
1027}
1028
1029static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
1030			      struct sk_buff *skb, struct hns3_desc *desc)
1031{
1032	u32 ol_type_vlan_len_msec = 0;
1033	u32 type_cs_vlan_tso = 0;
1034	u32 paylen = skb->len;
1035	u16 inner_vtag = 0;
1036	u16 out_vtag = 0;
1037	u16 mss = 0;
1038	int ret;
1039
1040	ret = hns3_handle_vtags(ring, skb);
1041	if (unlikely(ret < 0)) {
1042		u64_stats_update_begin(&ring->syncp);
1043		ring->stats.tx_vlan_err++;
1044		u64_stats_update_end(&ring->syncp);
1045		return ret;
1046	} else if (ret == HNS3_INNER_VLAN_TAG) {
1047		inner_vtag = skb_vlan_tag_get(skb);
1048		inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1049				VLAN_PRIO_MASK;
1050		hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
1051	} else if (ret == HNS3_OUTER_VLAN_TAG) {
1052		out_vtag = skb_vlan_tag_get(skb);
1053		out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1054				VLAN_PRIO_MASK;
1055		hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1056			       1);
1057	}
1058
1059	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1060		u8 ol4_proto, il4_proto;
1061
1062		skb_reset_mac_len(skb);
1063
1064		ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1065		if (unlikely(ret)) {
1066			u64_stats_update_begin(&ring->syncp);
1067			ring->stats.tx_l4_proto_err++;
1068			u64_stats_update_end(&ring->syncp);
1069			return ret;
1070		}
1071
1072		ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
1073				      &type_cs_vlan_tso,
1074				      &ol_type_vlan_len_msec);
1075		if (unlikely(ret)) {
1076			u64_stats_update_begin(&ring->syncp);
1077			ring->stats.tx_l2l3l4_err++;
1078			u64_stats_update_end(&ring->syncp);
1079			return ret;
1080		}
1081
1082		ret = hns3_set_tso(skb, &paylen, &mss,
1083				   &type_cs_vlan_tso);
1084		if (unlikely(ret)) {
1085			u64_stats_update_begin(&ring->syncp);
1086			ring->stats.tx_tso_err++;
1087			u64_stats_update_end(&ring->syncp);
1088			return ret;
1089		}
1090	}
1091
1092	/* Set txbd */
1093	desc->tx.ol_type_vlan_len_msec =
1094		cpu_to_le32(ol_type_vlan_len_msec);
1095	desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
1096	desc->tx.paylen = cpu_to_le32(paylen);
1097	desc->tx.mss = cpu_to_le16(mss);
1098	desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1099	desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1100
1101	return 0;
1102}
1103
1104static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1105			  unsigned int size, int frag_end,
1106			  enum hns_desc_type type)
1107{
1108	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1109	struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1110	struct device *dev = ring_to_dev(ring);
1111	skb_frag_t *frag;
1112	unsigned int frag_buf_num;
1113	int k, sizeoflast;
1114	dma_addr_t dma;
1115
1116	if (type == DESC_TYPE_SKB) {
1117		struct sk_buff *skb = (struct sk_buff *)priv;
1118		int ret;
1119
1120		ret = hns3_fill_skb_desc(ring, skb, desc);
1121		if (unlikely(ret))
1122			return ret;
1123
1124		dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1125	} else {
1126		frag = (skb_frag_t *)priv;
1127		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1128	}
1129
1130	if (unlikely(dma_mapping_error(dev, dma))) {
1131		u64_stats_update_begin(&ring->syncp);
1132		ring->stats.sw_err_cnt++;
1133		u64_stats_update_end(&ring->syncp);
1134		return -ENOMEM;
1135	}
1136
1137	desc_cb->length = size;
1138
1139	if (likely(size <= HNS3_MAX_BD_SIZE)) {
1140		u16 bdtp_fe_sc_vld_ra_ri = 0;
1141
1142		desc_cb->priv = priv;
1143		desc_cb->dma = dma;
1144		desc_cb->type = type;
1145		desc->addr = cpu_to_le64(dma);
1146		desc->tx.send_size = cpu_to_le16(size);
1147		hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
1148		desc->tx.bdtp_fe_sc_vld_ra_ri =
1149			cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1150
1151		ring_ptr_move_fw(ring, next_to_use);
1152		return 0;
1153	}
1154
1155	frag_buf_num = hns3_tx_bd_count(size);
1156	sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1157	sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1158
1159	/* When frag size is bigger than hardware limit, split this frag */
1160	for (k = 0; k < frag_buf_num; k++) {
1161		u16 bdtp_fe_sc_vld_ra_ri = 0;
1162
1163		/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1164		desc_cb->priv = priv;
1165		desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1166		desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1167				DESC_TYPE_SKB : DESC_TYPE_PAGE;
1168
1169		/* now, fill the descriptor */
1170		desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1171		desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1172				     (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1173		hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1174				       frag_end && (k == frag_buf_num - 1) ?
1175						1 : 0);
1176		desc->tx.bdtp_fe_sc_vld_ra_ri =
1177				cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1178
1179		/* move ring pointer to next */
1180		ring_ptr_move_fw(ring, next_to_use);
1181
1182		desc_cb = &ring->desc_cb[ring->next_to_use];
1183		desc = &ring->desc[ring->next_to_use];
1184	}
1185
1186	return 0;
1187}
1188
1189static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
1190{
1191	unsigned int bd_num;
1192	int i;
1193
1194	/* if the total len is within the max bd limit */
1195	if (likely(skb->len <= HNS3_MAX_BD_SIZE))
1196		return skb_shinfo(skb)->nr_frags + 1;
1197
1198	bd_num = hns3_tx_bd_count(skb_headlen(skb));
1199
1200	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1201		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1202		bd_num += hns3_tx_bd_count(skb_frag_size(frag));
1203	}
1204
1205	return bd_num;
1206}
1207
1208static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
1209{
1210	if (!skb->encapsulation)
1211		return skb_transport_offset(skb) + tcp_hdrlen(skb);
1212
1213	return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
1214}
1215
1216/* HW need every continuous 8 buffer data to be larger than MSS,
1217 * we simplify it by ensuring skb_headlen + the first continuous
1218 * 7 frags to to be larger than gso header len + mss, and the remaining
1219 * continuous 7 frags to be larger than MSS except the last 7 frags.
1220 */
1221static bool hns3_skb_need_linearized(struct sk_buff *skb)
1222{
1223	int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
1224	unsigned int tot_len = 0;
1225	int i;
1226
1227	for (i = 0; i < bd_limit; i++)
1228		tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1229
1230	/* ensure headlen + the first 7 frags is greater than mss + header
1231	 * and the first 7 frags is greater than mss.
1232	 */
1233	if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
1234	    hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
1235		return true;
1236
1237	/* ensure the remaining continuous 7 buffer is greater than mss */
1238	for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
1239		tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
1240		tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
1241
1242		if (tot_len < skb_shinfo(skb)->gso_size)
1243			return true;
1244	}
1245
1246	return false;
1247}
1248
1249static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1250				  struct sk_buff **out_skb)
1251{
1252	struct sk_buff *skb = *out_skb;
1253	unsigned int bd_num;
1254
1255	bd_num = hns3_nic_bd_num(skb);
1256	if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
1257		struct sk_buff *new_skb;
1258
1259		if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
1260		    !hns3_skb_need_linearized(skb))
1261			goto out;
1262
1263		/* manual split the send packet */
1264		new_skb = skb_copy(skb, GFP_ATOMIC);
1265		if (!new_skb)
1266			return -ENOMEM;
1267		dev_kfree_skb_any(skb);
1268		*out_skb = new_skb;
1269
1270		bd_num = hns3_nic_bd_num(new_skb);
1271		if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
1272		    (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
1273			return -ENOMEM;
1274
1275		u64_stats_update_begin(&ring->syncp);
1276		ring->stats.tx_copy++;
1277		u64_stats_update_end(&ring->syncp);
1278	}
1279
1280out:
1281	if (unlikely(ring_space(ring) < bd_num))
1282		return -EBUSY;
1283
1284	return bd_num;
1285}
1286
1287static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1288{
1289	struct device *dev = ring_to_dev(ring);
1290	unsigned int i;
1291
1292	for (i = 0; i < ring->desc_num; i++) {
1293		/* check if this is where we started */
1294		if (ring->next_to_use == next_to_use_orig)
1295			break;
1296
1297		/* rollback one */
1298		ring_ptr_move_bw(ring, next_to_use);
1299
1300		/* unmap the descriptor dma address */
1301		if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1302			dma_unmap_single(dev,
1303					 ring->desc_cb[ring->next_to_use].dma,
1304					ring->desc_cb[ring->next_to_use].length,
1305					DMA_TO_DEVICE);
1306		else if (ring->desc_cb[ring->next_to_use].length)
1307			dma_unmap_page(dev,
1308				       ring->desc_cb[ring->next_to_use].dma,
1309				       ring->desc_cb[ring->next_to_use].length,
1310				       DMA_TO_DEVICE);
1311
1312		ring->desc_cb[ring->next_to_use].length = 0;
1313		ring->desc_cb[ring->next_to_use].dma = 0;
1314	}
1315}
1316
1317netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1318{
1319	struct hns3_nic_priv *priv = netdev_priv(netdev);
1320	struct hns3_nic_ring_data *ring_data =
1321		&tx_ring_data(priv, skb->queue_mapping);
1322	struct hns3_enet_ring *ring = ring_data->ring;
1323	struct netdev_queue *dev_queue;
1324	skb_frag_t *frag;
1325	int next_to_use_head;
1326	int buf_num;
1327	int seg_num;
1328	int size;
1329	int ret;
1330	int i;
1331
1332	/* Prefetch the data used later */
1333	prefetch(skb->data);
1334
1335	buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
1336	if (unlikely(buf_num <= 0)) {
1337		if (buf_num == -EBUSY) {
1338			u64_stats_update_begin(&ring->syncp);
1339			ring->stats.tx_busy++;
1340			u64_stats_update_end(&ring->syncp);
1341			goto out_net_tx_busy;
1342		} else if (buf_num == -ENOMEM) {
1343			u64_stats_update_begin(&ring->syncp);
1344			ring->stats.sw_err_cnt++;
1345			u64_stats_update_end(&ring->syncp);
1346		}
1347
1348		hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
1349		goto out_err_tx_ok;
1350	}
1351
1352	/* No. of segments (plus a header) */
1353	seg_num = skb_shinfo(skb)->nr_frags + 1;
1354	/* Fill the first part */
1355	size = skb_headlen(skb);
1356
1357	next_to_use_head = ring->next_to_use;
1358
1359	ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1360			     DESC_TYPE_SKB);
1361	if (unlikely(ret))
1362		goto fill_err;
1363
1364	/* Fill the fragments */
1365	for (i = 1; i < seg_num; i++) {
1366		frag = &skb_shinfo(skb)->frags[i - 1];
1367		size = skb_frag_size(frag);
1368
1369		ret = hns3_fill_desc(ring, frag, size,
1370				     seg_num - 1 == i ? 1 : 0,
1371				     DESC_TYPE_PAGE);
1372
1373		if (unlikely(ret))
1374			goto fill_err;
1375	}
1376
1377	/* Complete translate all packets */
1378	dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1379	netdev_tx_sent_queue(dev_queue, skb->len);
1380
1381	wmb(); /* Commit all data before submit */
1382
1383	hnae3_queue_xmit(ring->tqp, buf_num);
1384
1385	return NETDEV_TX_OK;
1386
1387fill_err:
1388	hns3_clear_desc(ring, next_to_use_head);
1389
1390out_err_tx_ok:
1391	dev_kfree_skb_any(skb);
1392	return NETDEV_TX_OK;
1393
1394out_net_tx_busy:
1395	netif_stop_subqueue(netdev, ring_data->queue_index);
1396	smp_mb(); /* Commit all data before submit */
1397
1398	return NETDEV_TX_BUSY;
1399}
1400
1401static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1402{
1403	struct hnae3_handle *h = hns3_get_handle(netdev);
1404	struct sockaddr *mac_addr = p;
1405	int ret;
1406
1407	if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1408		return -EADDRNOTAVAIL;
1409
1410	if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1411		netdev_info(netdev, "already using mac address %pM\n",
1412			    mac_addr->sa_data);
1413		return 0;
1414	}
1415
1416	ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1417	if (ret) {
1418		netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1419		return ret;
1420	}
1421
1422	ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1423
1424	return 0;
1425}
1426
1427static int hns3_nic_do_ioctl(struct net_device *netdev,
1428			     struct ifreq *ifr, int cmd)
1429{
1430	struct hnae3_handle *h = hns3_get_handle(netdev);
1431
1432	if (!netif_running(netdev))
1433		return -EINVAL;
1434
1435	if (!h->ae_algo->ops->do_ioctl)
1436		return -EOPNOTSUPP;
1437
1438	return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1439}
1440
1441static int hns3_nic_set_features(struct net_device *netdev,
1442				 netdev_features_t features)
1443{
1444	netdev_features_t changed = netdev->features ^ features;
1445	struct hns3_nic_priv *priv = netdev_priv(netdev);
1446	struct hnae3_handle *h = priv->ae_handle;
1447	bool enable;
1448	int ret;
1449
1450	if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
1451		enable = !!(features & NETIF_F_GRO_HW);
1452		ret = h->ae_algo->ops->set_gro_en(h, enable);
1453		if (ret)
1454			return ret;
1455	}
1456
1457	if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1458	    h->ae_algo->ops->enable_vlan_filter) {
1459		enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
1460		h->ae_algo->ops->enable_vlan_filter(h, enable);
1461	}
1462
1463	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1464	    h->ae_algo->ops->enable_hw_strip_rxvtag) {
1465		enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1466		ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
1467		if (ret)
1468			return ret;
1469	}
1470
1471	if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1472		enable = !!(features & NETIF_F_NTUPLE);
1473		h->ae_algo->ops->enable_fd(h, enable);
1474	}
1475
1476	netdev->features = features;
1477	return 0;
1478}
1479
1480static void hns3_nic_get_stats64(struct net_device *netdev,
1481				 struct rtnl_link_stats64 *stats)
1482{
1483	struct hns3_nic_priv *priv = netdev_priv(netdev);
1484	int queue_num = priv->ae_handle->kinfo.num_tqps;
1485	struct hnae3_handle *handle = priv->ae_handle;
1486	struct hns3_enet_ring *ring;
1487	u64 rx_length_errors = 0;
1488	u64 rx_crc_errors = 0;
1489	u64 rx_multicast = 0;
1490	unsigned int start;
1491	u64 tx_errors = 0;
1492	u64 rx_errors = 0;
1493	unsigned int idx;
1494	u64 tx_bytes = 0;
1495	u64 rx_bytes = 0;
1496	u64 tx_pkts = 0;
1497	u64 rx_pkts = 0;
1498	u64 tx_drop = 0;
1499	u64 rx_drop = 0;
1500
1501	if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1502		return;
1503
1504	handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1505
1506	for (idx = 0; idx < queue_num; idx++) {
1507		/* fetch the tx stats */
1508		ring = priv->ring_data[idx].ring;
1509		do {
1510			start = u64_stats_fetch_begin_irq(&ring->syncp);
1511			tx_bytes += ring->stats.tx_bytes;
1512			tx_pkts += ring->stats.tx_pkts;
1513			tx_drop += ring->stats.sw_err_cnt;
1514			tx_drop += ring->stats.tx_vlan_err;
1515			tx_drop += ring->stats.tx_l4_proto_err;
1516			tx_drop += ring->stats.tx_l2l3l4_err;
1517			tx_drop += ring->stats.tx_tso_err;
1518			tx_errors += ring->stats.sw_err_cnt;
1519			tx_errors += ring->stats.tx_vlan_err;
1520			tx_errors += ring->stats.tx_l4_proto_err;
1521			tx_errors += ring->stats.tx_l2l3l4_err;
1522			tx_errors += ring->stats.tx_tso_err;
1523		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1524
1525		/* fetch the rx stats */
1526		ring = priv->ring_data[idx + queue_num].ring;
1527		do {
1528			start = u64_stats_fetch_begin_irq(&ring->syncp);
1529			rx_bytes += ring->stats.rx_bytes;
1530			rx_pkts += ring->stats.rx_pkts;
1531			rx_drop += ring->stats.l2_err;
1532			rx_errors += ring->stats.l2_err;
1533			rx_errors += ring->stats.l3l4_csum_err;
1534			rx_crc_errors += ring->stats.l2_err;
1535			rx_multicast += ring->stats.rx_multicast;
1536			rx_length_errors += ring->stats.err_pkt_len;
1537		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1538	}
1539
1540	stats->tx_bytes = tx_bytes;
1541	stats->tx_packets = tx_pkts;
1542	stats->rx_bytes = rx_bytes;
1543	stats->rx_packets = rx_pkts;
1544
1545	stats->rx_errors = rx_errors;
1546	stats->multicast = rx_multicast;
1547	stats->rx_length_errors = rx_length_errors;
1548	stats->rx_crc_errors = rx_crc_errors;
1549	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1550
1551	stats->tx_errors = tx_errors;
1552	stats->rx_dropped = rx_drop;
1553	stats->tx_dropped = tx_drop;
1554	stats->collisions = netdev->stats.collisions;
1555	stats->rx_over_errors = netdev->stats.rx_over_errors;
1556	stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1557	stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1558	stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1559	stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1560	stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1561	stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1562	stats->tx_window_errors = netdev->stats.tx_window_errors;
1563	stats->rx_compressed = netdev->stats.rx_compressed;
1564	stats->tx_compressed = netdev->stats.tx_compressed;
1565}
1566
1567static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1568{
1569	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1570	u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1571	struct hnae3_knic_private_info *kinfo;
1572	u8 tc = mqprio_qopt->qopt.num_tc;
1573	u16 mode = mqprio_qopt->mode;
1574	u8 hw = mqprio_qopt->qopt.hw;
1575	struct hnae3_handle *h;
1576
1577	if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1578	       mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1579		return -EOPNOTSUPP;
1580
1581	if (tc > HNAE3_MAX_TC)
1582		return -EINVAL;
1583
1584	if (!netdev)
1585		return -EINVAL;
1586
1587	h = hns3_get_handle(netdev);
1588	kinfo = &h->kinfo;
1589
1590	netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
1591
1592	return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1593		kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1594}
1595
1596static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1597			     void *type_data)
1598{
1599	if (type != TC_SETUP_QDISC_MQPRIO)
1600		return -EOPNOTSUPP;
1601
1602	return hns3_setup_tc(dev, type_data);
1603}
1604
1605static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1606				__be16 proto, u16 vid)
1607{
1608	struct hnae3_handle *h = hns3_get_handle(netdev);
1609	int ret = -EIO;
1610
1611	if (h->ae_algo->ops->set_vlan_filter)
1612		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1613
1614	return ret;
1615}
1616
1617static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1618				 __be16 proto, u16 vid)
1619{
1620	struct hnae3_handle *h = hns3_get_handle(netdev);
1621	int ret = -EIO;
1622
1623	if (h->ae_algo->ops->set_vlan_filter)
1624		ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1625
1626	return ret;
1627}
1628
1629static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1630				u8 qos, __be16 vlan_proto)
1631{
1632	struct hnae3_handle *h = hns3_get_handle(netdev);
1633	int ret = -EIO;
1634
1635	netif_dbg(h, drv, netdev,
1636		  "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
1637		  vf, vlan, qos, vlan_proto);
1638
1639	if (h->ae_algo->ops->set_vf_vlan_filter)
1640		ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1641							  qos, vlan_proto);
1642
1643	return ret;
1644}
1645
1646static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1647{
1648	struct hnae3_handle *h = hns3_get_handle(netdev);
1649	int ret;
1650
1651	if (hns3_nic_resetting(netdev))
1652		return -EBUSY;
1653
1654	if (!h->ae_algo->ops->set_mtu)
1655		return -EOPNOTSUPP;
1656
1657	netif_dbg(h, drv, netdev,
1658		  "change mtu from %u to %d\n", netdev->mtu, new_mtu);
1659
1660	ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1661	if (ret)
1662		netdev_err(netdev, "failed to change MTU in hardware %d\n",
1663			   ret);
1664	else
1665		netdev->mtu = new_mtu;
1666
1667	return ret;
1668}
1669
1670static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1671{
1672	struct hns3_nic_priv *priv = netdev_priv(ndev);
1673	struct hnae3_handle *h = hns3_get_handle(ndev);
1674	struct hns3_enet_ring *tx_ring = NULL;
1675	struct napi_struct *napi;
1676	int timeout_queue = 0;
1677	int hw_head, hw_tail;
1678	int fbd_num, fbd_oft;
1679	int ebd_num, ebd_oft;
1680	int bd_num, bd_err;
1681	int ring_en, tc;
1682	int i;
1683
1684	/* Find the stopped queue the same way the stack does */
1685	for (i = 0; i < ndev->num_tx_queues; i++) {
1686		struct netdev_queue *q;
1687		unsigned long trans_start;
1688
1689		q = netdev_get_tx_queue(ndev, i);
1690		trans_start = q->trans_start;
1691		if (netif_xmit_stopped(q) &&
1692		    time_after(jiffies,
1693			       (trans_start + ndev->watchdog_timeo))) {
1694			timeout_queue = i;
1695			break;
1696		}
1697	}
1698
1699	if (i == ndev->num_tx_queues) {
1700		netdev_info(ndev,
1701			    "no netdev TX timeout queue found, timeout count: %llu\n",
1702			    priv->tx_timeout_count);
1703		return false;
1704	}
1705
1706	priv->tx_timeout_count++;
1707
1708	tx_ring = priv->ring_data[timeout_queue].ring;
1709	napi = &tx_ring->tqp_vector->napi;
1710
1711	netdev_info(ndev,
1712		    "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
1713		    priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
1714		    tx_ring->next_to_clean, napi->state);
1715
1716	netdev_info(ndev,
1717		    "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
1718		    tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
1719		    tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
1720
1721	netdev_info(ndev,
1722		    "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
1723		    tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
1724		    tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
1725
1726	/* When mac received many pause frames continuous, it's unable to send
1727	 * packets, which may cause tx timeout
1728	 */
1729	if (h->ae_algo->ops->get_mac_stats) {
1730		struct hns3_mac_stats mac_stats;
1731
1732		h->ae_algo->ops->get_mac_stats(h, &mac_stats);
1733		netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
1734			    mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
1735	}
1736
1737	hw_head = readl_relaxed(tx_ring->tqp->io_base +
1738				HNS3_RING_TX_RING_HEAD_REG);
1739	hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1740				HNS3_RING_TX_RING_TAIL_REG);
1741	fbd_num = readl_relaxed(tx_ring->tqp->io_base +
1742				HNS3_RING_TX_RING_FBDNUM_REG);
1743	fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
1744				HNS3_RING_TX_RING_OFFSET_REG);
1745	ebd_num = readl_relaxed(tx_ring->tqp->io_base +
1746				HNS3_RING_TX_RING_EBDNUM_REG);
1747	ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
1748				HNS3_RING_TX_RING_EBD_OFFSET_REG);
1749	bd_num = readl_relaxed(tx_ring->tqp->io_base +
1750			       HNS3_RING_TX_RING_BD_NUM_REG);
1751	bd_err = readl_relaxed(tx_ring->tqp->io_base +
1752			       HNS3_RING_TX_RING_BD_ERR_REG);
1753	ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
1754	tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
1755
1756	netdev_info(ndev,
1757		    "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
1758		    bd_num, hw_head, hw_tail, bd_err,
1759		    readl(tx_ring->tqp_vector->mask_addr));
1760	netdev_info(ndev,
1761		    "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
1762		    ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
1763
1764	return true;
1765}
1766
1767static void hns3_nic_net_timeout(struct net_device *ndev)
1768{
1769	struct hns3_nic_priv *priv = netdev_priv(ndev);
1770	struct hnae3_handle *h = priv->ae_handle;
1771
1772	if (!hns3_get_tx_timeo_queue_info(ndev))
1773		return;
1774
1775	/* request the reset, and let the hclge to determine
1776	 * which reset level should be done
1777	 */
1778	if (h->ae_algo->ops->reset_event)
1779		h->ae_algo->ops->reset_event(h->pdev, h);
1780}
1781
1782#ifdef CONFIG_RFS_ACCEL
1783static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1784			      u16 rxq_index, u32 flow_id)
1785{
1786	struct hnae3_handle *h = hns3_get_handle(dev);
1787	struct flow_keys fkeys;
1788
1789	if (!h->ae_algo->ops->add_arfs_entry)
1790		return -EOPNOTSUPP;
1791
1792	if (skb->encapsulation)
1793		return -EPROTONOSUPPORT;
1794
1795	if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
1796		return -EPROTONOSUPPORT;
1797
1798	if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
1799	     fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
1800	    (fkeys.basic.ip_proto != IPPROTO_TCP &&
1801	     fkeys.basic.ip_proto != IPPROTO_UDP))
1802		return -EPROTONOSUPPORT;
1803
1804	return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
1805}
1806#endif
1807
1808static const struct net_device_ops hns3_nic_netdev_ops = {
1809	.ndo_open		= hns3_nic_net_open,
1810	.ndo_stop		= hns3_nic_net_stop,
1811	.ndo_start_xmit		= hns3_nic_net_xmit,
1812	.ndo_tx_timeout		= hns3_nic_net_timeout,
1813	.ndo_set_mac_address	= hns3_nic_net_set_mac_address,
1814	.ndo_do_ioctl		= hns3_nic_do_ioctl,
1815	.ndo_change_mtu		= hns3_nic_change_mtu,
1816	.ndo_set_features	= hns3_nic_set_features,
1817	.ndo_get_stats64	= hns3_nic_get_stats64,
1818	.ndo_setup_tc		= hns3_nic_setup_tc,
1819	.ndo_set_rx_mode	= hns3_nic_set_rx_mode,
1820	.ndo_vlan_rx_add_vid	= hns3_vlan_rx_add_vid,
1821	.ndo_vlan_rx_kill_vid	= hns3_vlan_rx_kill_vid,
1822	.ndo_set_vf_vlan	= hns3_ndo_set_vf_vlan,
1823#ifdef CONFIG_RFS_ACCEL
1824	.ndo_rx_flow_steer	= hns3_rx_flow_steer,
1825#endif
1826
1827};
1828
1829bool hns3_is_phys_func(struct pci_dev *pdev)
1830{
1831	u32 dev_id = pdev->device;
1832
1833	switch (dev_id) {
1834	case HNAE3_DEV_ID_GE:
1835	case HNAE3_DEV_ID_25GE:
1836	case HNAE3_DEV_ID_25GE_RDMA:
1837	case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1838	case HNAE3_DEV_ID_50GE_RDMA:
1839	case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1840	case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1841		return true;
1842	case HNAE3_DEV_ID_100G_VF:
1843	case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1844		return false;
1845	default:
1846		dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1847			 dev_id);
1848	}
1849
1850	return false;
1851}
1852
1853static void hns3_disable_sriov(struct pci_dev *pdev)
1854{
1855	/* If our VFs are assigned we cannot shut down SR-IOV
1856	 * without causing issues, so just leave the hardware
1857	 * available but disabled
1858	 */
1859	if (pci_vfs_assigned(pdev)) {
1860		dev_warn(&pdev->dev,
1861			 "disabling driver while VFs are assigned\n");
1862		return;
1863	}
1864
1865	pci_disable_sriov(pdev);
1866}
1867
1868static void hns3_get_dev_capability(struct pci_dev *pdev,
1869				    struct hnae3_ae_dev *ae_dev)
1870{
1871	if (pdev->revision >= 0x21) {
1872		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1873		hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
1874	}
1875}
1876
1877/* hns3_probe - Device initialization routine
1878 * @pdev: PCI device information struct
1879 * @ent: entry in hns3_pci_tbl
1880 *
1881 * hns3_probe initializes a PF identified by a pci_dev structure.
1882 * The OS initialization, configuring of the PF private structure,
1883 * and a hardware reset occur.
1884 *
1885 * Returns 0 on success, negative on failure
1886 */
1887static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1888{
1889	struct hnae3_ae_dev *ae_dev;
1890	int ret;
1891
1892	ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
1893	if (!ae_dev) {
1894		ret = -ENOMEM;
1895		return ret;
1896	}
1897
1898	ae_dev->pdev = pdev;
1899	ae_dev->flag = ent->driver_data;
1900	ae_dev->reset_type = HNAE3_NONE_RESET;
1901	hns3_get_dev_capability(pdev, ae_dev);
1902	pci_set_drvdata(pdev, ae_dev);
1903
1904	ret = hnae3_register_ae_dev(ae_dev);
1905	if (ret) {
1906		devm_kfree(&pdev->dev, ae_dev);
1907		pci_set_drvdata(pdev, NULL);
1908	}
1909
1910	return ret;
1911}
1912
1913/* hns3_remove - Device removal routine
1914 * @pdev: PCI device information struct
1915 */
1916static void hns3_remove(struct pci_dev *pdev)
1917{
1918	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1919
1920	if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1921		hns3_disable_sriov(pdev);
1922
1923	hnae3_unregister_ae_dev(ae_dev);
1924	pci_set_drvdata(pdev, NULL);
1925}
1926
1927/**
1928 * hns3_pci_sriov_configure
1929 * @pdev: pointer to a pci_dev structure
1930 * @num_vfs: number of VFs to allocate
1931 *
1932 * Enable or change the number of VFs. Called when the user updates the number
1933 * of VFs in sysfs.
1934 **/
1935static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1936{
1937	int ret;
1938
1939	if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1940		dev_warn(&pdev->dev, "Can not config SRIOV\n");
1941		return -EINVAL;
1942	}
1943
1944	if (num_vfs) {
1945		ret = pci_enable_sriov(pdev, num_vfs);
1946		if (ret)
1947			dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1948		else
1949			return num_vfs;
1950	} else if (!pci_vfs_assigned(pdev)) {
1951		pci_disable_sriov(pdev);
1952	} else {
1953		dev_warn(&pdev->dev,
1954			 "Unable to free VFs because some are assigned to VMs.\n");
1955	}
1956
1957	return 0;
1958}
1959
1960static void hns3_shutdown(struct pci_dev *pdev)
1961{
1962	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1963
1964	hnae3_unregister_ae_dev(ae_dev);
1965	devm_kfree(&pdev->dev, ae_dev);
1966	pci_set_drvdata(pdev, NULL);
1967
1968	if (system_state == SYSTEM_POWER_OFF)
1969		pci_set_power_state(pdev, PCI_D3hot);
1970}
1971
1972static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1973					    pci_channel_state_t state)
1974{
1975	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1976	pci_ers_result_t ret;
1977
1978	dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1979
1980	if (state == pci_channel_io_perm_failure)
1981		return PCI_ERS_RESULT_DISCONNECT;
1982
1983	if (!ae_dev || !ae_dev->ops) {
1984		dev_err(&pdev->dev,
1985			"Can't recover - error happened before device initialized\n");
1986		return PCI_ERS_RESULT_NONE;
1987	}
1988
1989	if (ae_dev->ops->handle_hw_ras_error)
1990		ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
1991	else
1992		return PCI_ERS_RESULT_NONE;
1993
1994	return ret;
1995}
1996
1997static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1998{
1999	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2000	const struct hnae3_ae_ops *ops;
2001	enum hnae3_reset_type reset_type;
2002	struct device *dev = &pdev->dev;
2003
2004	if (!ae_dev || !ae_dev->ops)
2005		return PCI_ERS_RESULT_NONE;
2006
2007	ops = ae_dev->ops;
2008	/* request the reset */
2009	if (ops->reset_event && ops->get_reset_level &&
2010	    ops->set_default_reset_request) {
2011		if (ae_dev->hw_err_reset_req) {
2012			reset_type = ops->get_reset_level(ae_dev,
2013						&ae_dev->hw_err_reset_req);
2014			ops->set_default_reset_request(ae_dev, reset_type);
2015			dev_info(dev, "requesting reset due to PCI error\n");
2016			ops->reset_event(pdev, NULL);
2017		}
2018
2019		return PCI_ERS_RESULT_RECOVERED;
2020	}
2021
2022	return PCI_ERS_RESULT_DISCONNECT;
2023}
2024
2025static void hns3_reset_prepare(struct pci_dev *pdev)
2026{
2027	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2028
2029	dev_info(&pdev->dev, "hns3 flr prepare\n");
2030	if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
2031		ae_dev->ops->flr_prepare(ae_dev);
2032}
2033
2034static void hns3_reset_done(struct pci_dev *pdev)
2035{
2036	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2037
2038	dev_info(&pdev->dev, "hns3 flr done\n");
2039	if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
2040		ae_dev->ops->flr_done(ae_dev);
2041}
2042
2043static const struct pci_error_handlers hns3_err_handler = {
2044	.error_detected = hns3_error_detected,
2045	.slot_reset     = hns3_slot_reset,
2046	.reset_prepare	= hns3_reset_prepare,
2047	.reset_done	= hns3_reset_done,
2048};
2049
2050static struct pci_driver hns3_driver = {
2051	.name     = hns3_driver_name,
2052	.id_table = hns3_pci_tbl,
2053	.probe    = hns3_probe,
2054	.remove   = hns3_remove,
2055	.shutdown = hns3_shutdown,
2056	.sriov_configure = hns3_pci_sriov_configure,
2057	.err_handler    = &hns3_err_handler,
2058};
2059
2060/* set default feature to hns3 */
2061static void hns3_set_default_feature(struct net_device *netdev)
2062{
2063	struct hnae3_handle *h = hns3_get_handle(netdev);
2064	struct pci_dev *pdev = h->pdev;
2065
2066	netdev->priv_flags |= IFF_UNICAST_FLT;
2067
2068	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2069		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2070		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2071		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2072		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2073
2074	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2075
2076	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2077
2078	netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2079		NETIF_F_HW_VLAN_CTAG_FILTER |
2080		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2081		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2082		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2083		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2084		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2085
2086	netdev->vlan_features |=
2087		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2088		NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
2089		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2090		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2091		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2092
2093	netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2094		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2095		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2096		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2097		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2098		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2099
2100	if (pdev->revision >= 0x21) {
2101		netdev->hw_features |= NETIF_F_GRO_HW;
2102		netdev->features |= NETIF_F_GRO_HW;
2103
2104		if (!(h->flags & HNAE3_SUPPORT_VF)) {
2105			netdev->hw_features |= NETIF_F_NTUPLE;
2106			netdev->features |= NETIF_F_NTUPLE;
2107		}
2108	}
2109}
2110
2111static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
2112			     struct hns3_desc_cb *cb)
2113{
2114	unsigned int order = hns3_page_order(ring);
2115	struct page *p;
2116
2117	p = dev_alloc_pages(order);
2118	if (!p)
2119		return -ENOMEM;
2120
2121	cb->priv = p;
2122	cb->page_offset = 0;
2123	cb->reuse_flag = 0;
2124	cb->buf  = page_address(p);
2125	cb->length = hns3_page_size(ring);
2126	cb->type = DESC_TYPE_PAGE;
2127
2128	return 0;
2129}
2130
2131static void hns3_free_buffer(struct hns3_enet_ring *ring,
2132			     struct hns3_desc_cb *cb)
2133{
2134	if (cb->type == DESC_TYPE_SKB)
2135		dev_kfree_skb_any((struct sk_buff *)cb->priv);
2136	else if (!HNAE3_IS_TX_RING(ring))
2137		put_page((struct page *)cb->priv);
2138	memset(cb, 0, sizeof(*cb));
2139}
2140
2141static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
2142{
2143	cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
2144			       cb->length, ring_to_dma_dir(ring));
2145
2146	if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
2147		return -EIO;
2148
2149	return 0;
2150}
2151
2152static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
2153			      struct hns3_desc_cb *cb)
2154{
2155	if (cb->type == DESC_TYPE_SKB)
2156		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
2157				 ring_to_dma_dir(ring));
2158	else if (cb->length)
2159		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
2160			       ring_to_dma_dir(ring));
2161}
2162
2163static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
2164{
2165	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2166	ring->desc[i].addr = 0;
2167}
2168
2169static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
2170{
2171	struct hns3_desc_cb *cb = &ring->desc_cb[i];
2172
2173	if (!ring->desc_cb[i].dma)
2174		return;
2175
2176	hns3_buffer_detach(ring, i);
2177	hns3_free_buffer(ring, cb);
2178}
2179
2180static void hns3_free_buffers(struct hns3_enet_ring *ring)
2181{
2182	int i;
2183
2184	for (i = 0; i < ring->desc_num; i++)
2185		hns3_free_buffer_detach(ring, i);
2186}
2187
2188/* free desc along with its attached buffer */
2189static void hns3_free_desc(struct hns3_enet_ring *ring)
2190{
2191	int size = ring->desc_num * sizeof(ring->desc[0]);
2192
2193	hns3_free_buffers(ring);
2194
2195	if (ring->desc) {
2196		dma_free_coherent(ring_to_dev(ring), size,
2197				  ring->desc, ring->desc_dma_addr);
2198		ring->desc = NULL;
2199	}
2200}
2201
2202static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2203{
2204	int size = ring->desc_num * sizeof(ring->desc[0]);
2205
2206	ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2207					&ring->desc_dma_addr, GFP_KERNEL);
2208	if (!ring->desc)
2209		return -ENOMEM;
2210
2211	return 0;
2212}
2213
2214static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
2215				   struct hns3_desc_cb *cb)
2216{
2217	int ret;
2218
2219	ret = hns3_alloc_buffer(ring, cb);
2220	if (ret)
2221		goto out;
2222
2223	ret = hns3_map_buffer(ring, cb);
2224	if (ret)
2225		goto out_with_buf;
2226
2227	return 0;
2228
2229out_with_buf:
2230	hns3_free_buffer(ring, cb);
2231out:
2232	return ret;
2233}
2234
2235static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2236{
2237	int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2238
2239	if (ret)
2240		return ret;
2241
2242	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2243
2244	return 0;
2245}
2246
2247/* Allocate memory for raw pkg, and map with dma */
2248static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2249{
2250	int i, j, ret;
2251
2252	for (i = 0; i < ring->desc_num; i++) {
2253		ret = hns3_alloc_buffer_attach(ring, i);
2254		if (ret)
2255			goto out_buffer_fail;
2256	}
2257
2258	return 0;
2259
2260out_buffer_fail:
2261	for (j = i - 1; j >= 0; j--)
2262		hns3_free_buffer_detach(ring, j);
2263	return ret;
2264}
2265
2266/* detach a in-used buffer and replace with a reserved one */
2267static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2268				struct hns3_desc_cb *res_cb)
2269{
2270	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2271	ring->desc_cb[i] = *res_cb;
2272	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2273	ring->desc[i].rx.bd_base_info = 0;
2274}
2275
2276static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2277{
2278	ring->desc_cb[i].reuse_flag = 0;
2279	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
2280					 ring->desc_cb[i].page_offset);
2281	ring->desc[i].rx.bd_base_info = 0;
2282}
2283
2284static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
2285				  int *bytes, int *pkts)
2286{
2287	int ntc = ring->next_to_clean;
2288	struct hns3_desc_cb *desc_cb;
2289
2290	while (head != ntc) {
2291		desc_cb = &ring->desc_cb[ntc];
2292		(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2293		(*bytes) += desc_cb->length;
2294		/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
2295		hns3_free_buffer_detach(ring, ntc);
2296
2297		if (++ntc == ring->desc_num)
2298			ntc = 0;
2299
2300		/* Issue prefetch for next Tx descriptor */
2301		prefetch(&ring->desc_cb[ntc]);
2302	}
2303
2304	/* This smp_store_release() pairs with smp_load_acquire() in
2305	 * ring_space called by hns3_nic_net_xmit.
2306	 */
2307	smp_store_release(&ring->next_to_clean, ntc);
2308}
2309
2310static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2311{
2312	int u = ring->next_to_use;
2313	int c = ring->next_to_clean;
2314
2315	if (unlikely(h > ring->desc_num))
2316		return 0;
2317
2318	return u > c ? (h > c && h <= u) : (h > c || h <= u);
2319}
2320
2321void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2322{
2323	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2324	struct hns3_nic_priv *priv = netdev_priv(netdev);
2325	struct netdev_queue *dev_queue;
2326	int bytes, pkts;
2327	int head;
2328
2329	head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2330	rmb(); /* Make sure head is ready before touch any data */
2331
2332	if (is_ring_empty(ring) || head == ring->next_to_clean)
2333		return; /* no data to poll */
2334
2335	if (unlikely(!is_valid_clean_head(ring, head))) {
2336		netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2337			   ring->next_to_use, ring->next_to_clean);
2338
2339		u64_stats_update_begin(&ring->syncp);
2340		ring->stats.io_err_cnt++;
2341		u64_stats_update_end(&ring->syncp);
2342		return;
2343	}
2344
2345	bytes = 0;
2346	pkts = 0;
2347	hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
2348
2349	ring->tqp_vector->tx_group.total_bytes += bytes;
2350	ring->tqp_vector->tx_group.total_packets += pkts;
2351
2352	u64_stats_update_begin(&ring->syncp);
2353	ring->stats.tx_bytes += bytes;
2354	ring->stats.tx_pkts += pkts;
2355	u64_stats_update_end(&ring->syncp);
2356
2357	dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2358	netdev_tx_completed_queue(dev_queue, pkts, bytes);
2359
2360	if (unlikely(pkts && netif_carrier_ok(netdev) &&
2361		     (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2362		/* Make sure that anybody stopping the queue after this
2363		 * sees the new next_to_clean.
2364		 */
2365		smp_mb();
2366		if (netif_tx_queue_stopped(dev_queue) &&
2367		    !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2368			netif_tx_wake_queue(dev_queue);
2369			ring->stats.restart_queue++;
2370		}
2371	}
2372}
2373
2374static int hns3_desc_unused(struct hns3_enet_ring *ring)
2375{
2376	int ntc = ring->next_to_clean;
2377	int ntu = ring->next_to_use;
2378
2379	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2380}
2381
2382static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
2383				      int cleand_count)
2384{
2385	struct hns3_desc_cb *desc_cb;
2386	struct hns3_desc_cb res_cbs;
2387	int i, ret;
2388
2389	for (i = 0; i < cleand_count; i++) {
2390		desc_cb = &ring->desc_cb[ring->next_to_use];
2391		if (desc_cb->reuse_flag) {
2392			u64_stats_update_begin(&ring->syncp);
2393			ring->stats.reuse_pg_cnt++;
2394			u64_stats_update_end(&ring->syncp);
2395
2396			hns3_reuse_buffer(ring, ring->next_to_use);
2397		} else {
2398			ret = hns3_reserve_buffer_map(ring, &res_cbs);
2399			if (ret) {
2400				u64_stats_update_begin(&ring->syncp);
2401				ring->stats.sw_err_cnt++;
2402				u64_stats_update_end(&ring->syncp);
2403
2404				hns3_rl_err(ring->tqp_vector->napi.dev,
2405					    "alloc rx buffer failed: %d\n",
2406					    ret);
2407				break;
2408			}
2409			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2410
2411			u64_stats_update_begin(&ring->syncp);
2412			ring->stats.non_reuse_pg++;
2413			u64_stats_update_end(&ring->syncp);
2414		}
2415
2416		ring_ptr_move_fw(ring, next_to_use);
2417	}
2418
2419	wmb(); /* Make all data has been write before submit */
2420	writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2421}
2422
2423static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2424				struct hns3_enet_ring *ring, int pull_len,
2425				struct hns3_desc_cb *desc_cb)
2426{
2427	struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
2428	int size = le16_to_cpu(desc->rx.size);
2429	u32 truesize = hns3_buf_size(ring);
2430
2431	skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2432			size - pull_len, truesize);
2433
2434	/* Avoid re-using remote pages, or the stack is still using the page
2435	 * when page_offset rollback to zero, flag default unreuse
2436	 */
2437	if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
2438	    (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
2439		return;
2440
2441	/* Move offset up to the next cache line */
2442	desc_cb->page_offset += truesize;
2443
2444	if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
2445		desc_cb->reuse_flag = 1;
2446		/* Bump ref count on page before it is given */
2447		get_page(desc_cb->priv);
2448	} else if (page_count(desc_cb->priv) == 1) {
2449		desc_cb->reuse_flag = 1;
2450		desc_cb->page_offset = 0;
2451		get_page(desc_cb->priv);
2452	}
2453}
2454
2455static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
2456{
2457	__be16 type = skb->protocol;
2458	struct tcphdr *th;
2459	int depth = 0;
2460
2461	while (eth_type_vlan(type)) {
2462		struct vlan_hdr *vh;
2463
2464		if ((depth + VLAN_HLEN) > skb_headlen(skb))
2465			return -EFAULT;
2466
2467		vh = (struct vlan_hdr *)(skb->data + depth);
2468		type = vh->h_vlan_encapsulated_proto;
2469		depth += VLAN_HLEN;
2470	}
2471
2472	skb_set_network_header(skb, depth);
2473
2474	if (type == htons(ETH_P_IP)) {
2475		const struct iphdr *iph = ip_hdr(skb);
2476
2477		depth += sizeof(struct iphdr);
2478		skb_set_transport_header(skb, depth);
2479		th = tcp_hdr(skb);
2480		th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
2481					  iph->daddr, 0);
2482	} else if (type == htons(ETH_P_IPV6)) {
2483		const struct ipv6hdr *iph = ipv6_hdr(skb);
2484
2485		depth += sizeof(struct ipv6hdr);
2486		skb_set_transport_header(skb, depth);
2487		th = tcp_hdr(skb);
2488		th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
2489					  &iph->daddr, 0);
2490	} else {
2491		hns3_rl_err(skb->dev,
2492			    "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
2493			    be16_to_cpu(type), depth);
2494		return -EFAULT;
2495	}
2496
2497	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2498	if (th->cwr)
2499		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2500
2501	if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
2502		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
2503
2504	skb->csum_start = (unsigned char *)th - skb->head;
2505	skb->csum_offset = offsetof(struct tcphdr, check);
2506	skb->ip_summed = CHECKSUM_PARTIAL;
2507	return 0;
2508}
2509
2510static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2511			     u32 l234info, u32 bd_base_info, u32 ol_info)
2512{
2513	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2514	int l3_type, l4_type;
2515	int ol4_type;
2516
2517	skb->ip_summed = CHECKSUM_NONE;
2518
2519	skb_checksum_none_assert(skb);
2520
2521	if (!(netdev->features & NETIF_F_RXCSUM))
2522		return;
2523
2524	/* check if hardware has done checksum */
2525	if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2526		return;
2527
2528	if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
2529				 BIT(HNS3_RXD_OL3E_B) |
2530				 BIT(HNS3_RXD_OL4E_B)))) {
2531		u64_stats_update_begin(&ring->syncp);
2532		ring->stats.l3l4_csum_err++;
2533		u64_stats_update_end(&ring->syncp);
2534
2535		return;
2536	}
2537
2538	ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
2539				   HNS3_RXD_OL4ID_S);
2540	switch (ol4_type) {
2541	case HNS3_OL4_TYPE_MAC_IN_UDP:
2542	case HNS3_OL4_TYPE_NVGRE:
2543		skb->csum_level = 1;
2544		/* fall through */
2545	case HNS3_OL4_TYPE_NO_TUN:
2546		l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2547					  HNS3_RXD_L3ID_S);
2548		l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2549					  HNS3_RXD_L4ID_S);
2550
2551		/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2552		if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2553		     l3_type == HNS3_L3_TYPE_IPV6) &&
2554		    (l4_type == HNS3_L4_TYPE_UDP ||
2555		     l4_type == HNS3_L4_TYPE_TCP ||
2556		     l4_type == HNS3_L4_TYPE_SCTP))
2557			skb->ip_summed = CHECKSUM_UNNECESSARY;
2558		break;
2559	default:
2560		break;
2561	}
2562}
2563
2564static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2565{
2566	if (skb_has_frag_list(skb))
2567		napi_gro_flush(&ring->tqp_vector->napi, false);
2568
2569	napi_gro_receive(&ring->tqp_vector->napi, skb);
2570}
2571
2572static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2573				struct hns3_desc *desc, u32 l234info,
2574				u16 *vlan_tag)
2575{
2576	struct hnae3_handle *handle = ring->tqp->handle;
2577	struct pci_dev *pdev = ring->tqp->handle->pdev;
2578
2579	if (pdev->revision == 0x20) {
2580		*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2581		if (!(*vlan_tag & VLAN_VID_MASK))
2582			*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2583
2584		return (*vlan_tag != 0);
2585	}
2586
2587#define HNS3_STRP_OUTER_VLAN	0x1
2588#define HNS3_STRP_INNER_VLAN	0x2
2589#define HNS3_STRP_BOTH		0x3
2590
2591	/* Hardware always insert VLAN tag into RX descriptor when
2592	 * remove the tag from packet, driver needs to determine
2593	 * reporting which tag to stack.
2594	 */
2595	switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2596				HNS3_RXD_STRP_TAGP_S)) {
2597	case HNS3_STRP_OUTER_VLAN:
2598		if (handle->port_base_vlan_state !=
2599				HNAE3_PORT_BASE_VLAN_DISABLE)
2600			return false;
2601
2602		*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2603		return true;
2604	case HNS3_STRP_INNER_VLAN:
2605		if (handle->port_base_vlan_state !=
2606				HNAE3_PORT_BASE_VLAN_DISABLE)
2607			return false;
2608
2609		*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2610		return true;
2611	case HNS3_STRP_BOTH:
2612		if (handle->port_base_vlan_state ==
2613				HNAE3_PORT_BASE_VLAN_DISABLE)
2614			*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2615		else
2616			*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2617
2618		return true;
2619	default:
2620		return false;
2621	}
2622}
2623
2624static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
2625			  unsigned char *va)
2626{
2627#define HNS3_NEED_ADD_FRAG	1
2628	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2629	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2630	struct sk_buff *skb;
2631
2632	ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
2633	skb = ring->skb;
2634	if (unlikely(!skb)) {
2635		hns3_rl_err(netdev, "alloc rx skb fail\n");
2636
2637		u64_stats_update_begin(&ring->syncp);
2638		ring->stats.sw_err_cnt++;
2639		u64_stats_update_end(&ring->syncp);
2640
2641		return -ENOMEM;
2642	}
2643
2644	prefetchw(skb->data);
2645
2646	ring->pending_buf = 1;
2647	ring->frag_num = 0;
2648	ring->tail_skb = NULL;
2649	if (length <= HNS3_RX_HEAD_SIZE) {
2650		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2651
2652		/* We can reuse buffer as-is, just make sure it is local */
2653		if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
2654			desc_cb->reuse_flag = 1;
2655		else /* This page cannot be reused so discard it */
2656			put_page(desc_cb->priv);
2657
2658		ring_ptr_move_fw(ring, next_to_clean);
2659		return 0;
2660	}
2661	u64_stats_update_begin(&ring->syncp);
2662	ring->stats.seg_pkt_cnt++;
2663	u64_stats_update_end(&ring->syncp);
2664
2665	ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
2666	__skb_put(skb, ring->pull_len);
2667	hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
2668			    desc_cb);
2669	ring_ptr_move_fw(ring, next_to_clean);
2670
2671	return HNS3_NEED_ADD_FRAG;
2672}
2673
2674static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
2675			 struct sk_buff **out_skb, bool pending)
2676{
2677	struct sk_buff *skb = *out_skb;
2678	struct sk_buff *head_skb = *out_skb;
2679	struct sk_buff *new_skb;
2680	struct hns3_desc_cb *desc_cb;
2681	struct hns3_desc *pre_desc;
2682	u32 bd_base_info;
2683	int pre_bd;
2684
2685	/* if there is pending bd, the SW param next_to_clean has moved
2686	 * to next and the next is NULL
2687	 */
2688	if (pending) {
2689		pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
2690			 ring->desc_num;
2691		pre_desc = &ring->desc[pre_bd];
2692		bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
2693	} else {
2694		bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2695	}
2696
2697	while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2698		desc = &ring->desc[ring->next_to_clean];
2699		desc_cb = &ring->desc_cb[ring->next_to_clean];
2700		bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2701		/* make sure HW write desc complete */
2702		dma_rmb();
2703		if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
2704			return -ENXIO;
2705
2706		if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
2707			new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2708						 HNS3_RX_HEAD_SIZE);
2709			if (unlikely(!new_skb)) {
2710				hns3_rl_err(ring->tqp_vector->napi.dev,
2711					    "alloc rx fraglist skb fail\n");
2712				return -ENXIO;
2713			}
2714			ring->frag_num = 0;
2715
2716			if (ring->tail_skb) {
2717				ring->tail_skb->next = new_skb;
2718				ring->tail_skb = new_skb;
2719			} else {
2720				skb_shinfo(skb)->frag_list = new_skb;
2721				ring->tail_skb = new_skb;
2722			}
2723		}
2724
2725		if (ring->tail_skb) {
2726			head_skb->truesize += hns3_buf_size(ring);
2727			head_skb->data_len += le16_to_cpu(desc->rx.size);
2728			head_skb->len += le16_to_cpu(desc->rx.size);
2729			skb = ring->tail_skb;
2730		}
2731
2732		hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
2733		ring_ptr_move_fw(ring, next_to_clean);
2734		ring->pending_buf++;
2735	}
2736
2737	return 0;
2738}
2739
2740static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
2741				     struct sk_buff *skb, u32 l234info,
2742				     u32 bd_base_info, u32 ol_info)
2743{
2744	u32 l3_type;
2745
2746	skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
2747						    HNS3_RXD_GRO_SIZE_M,
2748						    HNS3_RXD_GRO_SIZE_S);
2749	/* if there is no HW GRO, do not set gro params */
2750	if (!skb_shinfo(skb)->gso_size) {
2751		hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
2752		return 0;
2753	}
2754
2755	NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
2756						  HNS3_RXD_GRO_COUNT_M,
2757						  HNS3_RXD_GRO_COUNT_S);
2758
2759	l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
2760	if (l3_type == HNS3_L3_TYPE_IPV4)
2761		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2762	else if (l3_type == HNS3_L3_TYPE_IPV6)
2763		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2764	else
2765		return -EFAULT;
2766
2767	return  hns3_gro_complete(skb, l234info);
2768}
2769
2770static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2771				     struct sk_buff *skb, u32 rss_hash)
2772{
2773	struct hnae3_handle *handle = ring->tqp->handle;
2774	enum pkt_hash_types rss_type;
2775
2776	if (rss_hash)
2777		rss_type = handle->kinfo.rss_type;
2778	else
2779		rss_type = PKT_HASH_TYPE_NONE;
2780
2781	skb_set_hash(skb, rss_hash, rss_type);
2782}
2783
2784static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
2785{
2786	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2787	enum hns3_pkt_l2t_type l2_frame_type;
2788	u32 bd_base_info, l234info, ol_info;
2789	struct hns3_desc *desc;
2790	unsigned int len;
2791	int pre_ntc, ret;
2792
2793	/* bdinfo handled below is only valid on the last BD of the
2794	 * current packet, and ring->next_to_clean indicates the first
2795	 * descriptor of next packet, so need - 1 below.
2796	 */
2797	pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
2798					(ring->desc_num - 1);
2799	desc = &ring->desc[pre_ntc];
2800	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2801	l234info = le32_to_cpu(desc->rx.l234_info);
2802	ol_info = le32_to_cpu(desc->rx.ol_info);
2803
2804	/* Based on hw strategy, the tag offloaded will be stored at
2805	 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2806	 * in one layer tag case.
2807	 */
2808	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2809		u16 vlan_tag;
2810
2811		if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2812			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2813					       vlan_tag);
2814	}
2815
2816	if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
2817				  BIT(HNS3_RXD_L2E_B))))) {
2818		u64_stats_update_begin(&ring->syncp);
2819		if (l234info & BIT(HNS3_RXD_L2E_B))
2820			ring->stats.l2_err++;
2821		else
2822			ring->stats.err_pkt_len++;
2823		u64_stats_update_end(&ring->syncp);
2824
2825		return -EFAULT;
2826	}
2827
2828	len = skb->len;
2829
2830	/* Do update ip stack process */
2831	skb->protocol = eth_type_trans(skb, netdev);
2832
2833	/* This is needed in order to enable forwarding support */
2834	ret = hns3_set_gro_and_checksum(ring, skb, l234info,
2835					bd_base_info, ol_info);
2836	if (unlikely(ret)) {
2837		u64_stats_update_begin(&ring->syncp);
2838		ring->stats.rx_err_cnt++;
2839		u64_stats_update_end(&ring->syncp);
2840		return ret;
2841	}
2842
2843	l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
2844					HNS3_RXD_DMAC_S);
2845
2846	u64_stats_update_begin(&ring->syncp);
2847	ring->stats.rx_pkts++;
2848	ring->stats.rx_bytes += len;
2849
2850	if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
2851		ring->stats.rx_multicast++;
2852
2853	u64_stats_update_end(&ring->syncp);
2854
2855	ring->tqp_vector->rx_group.total_bytes += len;
2856
2857	hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
2858	return 0;
2859}
2860
2861static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2862			     struct sk_buff **out_skb)
2863{
2864	struct sk_buff *skb = ring->skb;
2865	struct hns3_desc_cb *desc_cb;
2866	struct hns3_desc *desc;
2867	unsigned int length;
2868	u32 bd_base_info;
2869	int ret;
2870
2871	desc = &ring->desc[ring->next_to_clean];
2872	desc_cb = &ring->desc_cb[ring->next_to_clean];
2873
2874	prefetch(desc);
2875
2876	length = le16_to_cpu(desc->rx.size);
2877	bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2878
2879	/* Check valid BD */
2880	if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2881		return -ENXIO;
2882
2883	if (!skb)
2884		ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2885
2886	/* Prefetch first cache line of first page
2887	 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2888	 * line size is 64B so need to prefetch twice to make it 128B. But in
2889	 * actual we can have greater size of caches with 128B Level 1 cache
2890	 * lines. In such a case, single fetch would suffice to cache in the
2891	 * relevant part of the header.
2892	 */
2893	prefetch(ring->va);
2894#if L1_CACHE_BYTES < 128
2895	prefetch(ring->va + L1_CACHE_BYTES);
2896#endif
2897
2898	if (!skb) {
2899		ret = hns3_alloc_skb(ring, length, ring->va);
2900		*out_skb = skb = ring->skb;
2901
2902		if (ret < 0) /* alloc buffer fail */
2903			return ret;
2904		if (ret > 0) { /* need add frag */
2905			ret = hns3_add_frag(ring, desc, &skb, false);
2906			if (ret)
2907				return ret;
2908
2909			/* As the head data may be changed when GRO enable, copy
2910			 * the head data in after other data rx completed
2911			 */
2912			memcpy(skb->data, ring->va,
2913			       ALIGN(ring->pull_len, sizeof(long)));
2914		}
2915	} else {
2916		ret = hns3_add_frag(ring, desc, &skb, true);
2917		if (ret)
2918			return ret;
2919
2920		/* As the head data may be changed when GRO enable, copy
2921		 * the head data in after other data rx completed
2922		 */
2923		memcpy(skb->data, ring->va,
2924		       ALIGN(ring->pull_len, sizeof(long)));
2925	}
2926
2927	ret = hns3_handle_bdinfo(ring, skb);
2928	if (unlikely(ret)) {
2929		dev_kfree_skb_any(skb);
2930		return ret;
2931	}
2932
2933	skb_record_rx_queue(skb, ring->tqp->tqp_index);
2934	*out_skb = skb;
2935
2936	return 0;
2937}
2938
2939int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
2940		       void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2941{
2942#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2943	int unused_count = hns3_desc_unused(ring);
2944	struct sk_buff *skb = ring->skb;
2945	int recv_pkts = 0;
2946	int recv_bds = 0;
2947	int err, num;
2948
2949	num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2950	rmb(); /* Make sure num taken effect before the other data is touched */
2951
2952	num -= unused_count;
2953	unused_count -= ring->pending_buf;
2954
2955	while (recv_pkts < budget && recv_bds < num) {
2956		/* Reuse or realloc buffers */
2957		if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2958			hns3_nic_alloc_rx_buffers(ring, unused_count);
2959			unused_count = hns3_desc_unused(ring) -
2960					ring->pending_buf;
2961		}
2962
2963		/* Poll one pkt */
2964		err = hns3_handle_rx_bd(ring, &skb);
2965		if (unlikely(!skb)) /* This fault cannot be repaired */
2966			goto out;
2967
2968		if (err == -ENXIO) { /* Do not get FE for the packet */
2969			goto out;
2970		} else if (unlikely(err)) {  /* Do jump the err */
2971			recv_bds += ring->pending_buf;
2972			unused_count += ring->pending_buf;
2973			ring->skb = NULL;
2974			ring->pending_buf = 0;
2975			continue;
2976		}
2977
2978		rx_fn(ring, skb);
2979		recv_bds += ring->pending_buf;
2980		unused_count += ring->pending_buf;
2981		ring->skb = NULL;
2982		ring->pending_buf = 0;
2983
2984		recv_pkts++;
2985	}
2986
2987out:
2988	/* Make all data has been write before submit */
2989	if (unused_count > 0)
2990		hns3_nic_alloc_rx_buffers(ring, unused_count);
2991
2992	return recv_pkts;
2993}
2994
2995static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
2996{
2997#define HNS3_RX_LOW_BYTE_RATE 10000
2998#define HNS3_RX_MID_BYTE_RATE 20000
2999#define HNS3_RX_ULTRA_PACKET_RATE 40
3000
3001	enum hns3_flow_level_range new_flow_level;
3002	struct hns3_enet_tqp_vector *tqp_vector;
3003	int packets_per_msecs, bytes_per_msecs;
3004	u32 time_passed_ms;
3005
3006	tqp_vector = ring_group->ring->tqp_vector;
3007	time_passed_ms =
3008		jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
3009	if (!time_passed_ms)
3010		return false;
3011
3012	do_div(ring_group->total_packets, time_passed_ms);
3013	packets_per_msecs = ring_group->total_packets;
3014
3015	do_div(ring_group->total_bytes, time_passed_ms);
3016	bytes_per_msecs = ring_group->total_bytes;
3017
3018	new_flow_level = ring_group->coal.flow_level;
3019
3020	/* Simple throttlerate management
3021	 * 0-10MB/s   lower     (50000 ints/s)
3022	 * 10-20MB/s   middle    (20000 ints/s)
3023	 * 20-1249MB/s high      (18000 ints/s)
3024	 * > 40000pps  ultra     (8000 ints/s)
3025	 */
3026	switch (new_flow_level) {
3027	case HNS3_FLOW_LOW:
3028		if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
3029			new_flow_level = HNS3_FLOW_MID;
3030		break;
3031	case HNS3_FLOW_MID:
3032		if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
3033			new_flow_level = HNS3_FLOW_HIGH;
3034		else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
3035			new_flow_level = HNS3_FLOW_LOW;
3036		break;
3037	case HNS3_FLOW_HIGH:
3038	case HNS3_FLOW_ULTRA:
3039	default:
3040		if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
3041			new_flow_level = HNS3_FLOW_MID;
3042		break;
3043	}
3044
3045	if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
3046	    &tqp_vector->rx_group == ring_group)
3047		new_flow_level = HNS3_FLOW_ULTRA;
3048
3049	ring_group->total_bytes = 0;
3050	ring_group->total_packets = 0;
3051	ring_group->coal.flow_level = new_flow_level;
3052
3053	return true;
3054}
3055
3056static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
3057{
3058	struct hns3_enet_tqp_vector *tqp_vector;
3059	u16 new_int_gl;
3060
3061	if (!ring_group->ring)
3062		return false;
3063
3064	tqp_vector = ring_group->ring->tqp_vector;
3065	if (!tqp_vector->last_jiffies)
3066		return false;
3067
3068	if (ring_group->total_packets == 0) {
3069		ring_group->coal.int_gl = HNS3_INT_GL_50K;
3070		ring_group->coal.flow_level = HNS3_FLOW_LOW;
3071		return true;
3072	}
3073
3074	if (!hns3_get_new_flow_lvl(ring_group))
3075		return false;
3076
3077	new_int_gl = ring_group->coal.int_gl;
3078	switch (ring_group->coal.flow_level) {
3079	case HNS3_FLOW_LOW:
3080		new_int_gl = HNS3_INT_GL_50K;
3081		break;
3082	case HNS3_FLOW_MID:
3083		new_int_gl = HNS3_INT_GL_20K;
3084		break;
3085	case HNS3_FLOW_HIGH:
3086		new_int_gl = HNS3_INT_GL_18K;
3087		break;
3088	case HNS3_FLOW_ULTRA:
3089		new_int_gl = HNS3_INT_GL_8K;
3090		break;
3091	default:
3092		break;
3093	}
3094
3095	if (new_int_gl != ring_group->coal.int_gl) {
3096		ring_group->coal.int_gl = new_int_gl;
3097		return true;
3098	}
3099	return false;
3100}
3101
3102static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
3103{
3104	struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
3105	struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
3106	bool rx_update, tx_update;
3107
3108	/* update param every 1000ms */
3109	if (time_before(jiffies,
3110			tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
3111		return;
3112
3113	if (rx_group->coal.gl_adapt_enable) {
3114		rx_update = hns3_get_new_int_gl(rx_group);
3115		if (rx_update)
3116			hns3_set_vector_coalesce_rx_gl(tqp_vector,
3117						       rx_group->coal.int_gl);
3118	}
3119
3120	if (tx_group->coal.gl_adapt_enable) {
3121		tx_update = hns3_get_new_int_gl(tx_group);
3122		if (tx_update)
3123			hns3_set_vector_coalesce_tx_gl(tqp_vector,
3124						       tx_group->coal.int_gl);
3125	}
3126
3127	tqp_vector->last_jiffies = jiffies;
3128}
3129
3130static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
3131{
3132	struct hns3_nic_priv *priv = netdev_priv(napi->dev);
3133	struct hns3_enet_ring *ring;
3134	int rx_pkt_total = 0;
3135
3136	struct hns3_enet_tqp_vector *tqp_vector =
3137		container_of(napi, struct hns3_enet_tqp_vector, napi);
3138	bool clean_complete = true;
3139	int rx_budget = budget;
3140
3141	if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3142		napi_complete(napi);
3143		return 0;
3144	}
3145
3146	/* Since the actual Tx work is minimal, we can give the Tx a larger
3147	 * budget and be more aggressive about cleaning up the Tx descriptors.
3148	 */
3149	hns3_for_each_ring(ring, tqp_vector->tx_group)
3150		hns3_clean_tx_ring(ring);
3151
3152	/* make sure rx ring budget not smaller than 1 */
3153	if (tqp_vector->num_tqps > 1)
3154		rx_budget = max(budget / tqp_vector->num_tqps, 1);
3155
3156	hns3_for_each_ring(ring, tqp_vector->rx_group) {
3157		int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
3158						    hns3_rx_skb);
3159
3160		if (rx_cleaned >= rx_budget)
3161			clean_complete = false;
3162
3163		rx_pkt_total += rx_cleaned;
3164	}
3165
3166	tqp_vector->rx_group.total_packets += rx_pkt_total;
3167
3168	if (!clean_complete)
3169		return budget;
3170
3171	if (napi_complete(napi) &&
3172	    likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3173		hns3_update_new_int_gl(tqp_vector);
3174		hns3_mask_vector_irq(tqp_vector, 1);
3175	}
3176
3177	return rx_pkt_total;
3178}
3179
3180static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3181				      struct hnae3_ring_chain_node *head)
3182{
3183	struct pci_dev *pdev = tqp_vector->handle->pdev;
3184	struct hnae3_ring_chain_node *cur_chain = head;
3185	struct hnae3_ring_chain_node *chain;
3186	struct hns3_enet_ring *tx_ring;
3187	struct hns3_enet_ring *rx_ring;
3188
3189	tx_ring = tqp_vector->tx_group.ring;
3190	if (tx_ring) {
3191		cur_chain->tqp_index = tx_ring->tqp->tqp_index;
3192		hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3193			      HNAE3_RING_TYPE_TX);
3194		hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3195				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
3196
3197		cur_chain->next = NULL;
3198
3199		while (tx_ring->next) {
3200			tx_ring = tx_ring->next;
3201
3202			chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
3203					     GFP_KERNEL);
3204			if (!chain)
3205				goto err_free_chain;
3206
3207			cur_chain->next = chain;
3208			chain->tqp_index = tx_ring->tqp->tqp_index;
3209			hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3210				      HNAE3_RING_TYPE_TX);
3211			hnae3_set_field(chain->int_gl_idx,
3212					HNAE3_RING_GL_IDX_M,
3213					HNAE3_RING_GL_IDX_S,
3214					HNAE3_RING_GL_TX);
3215
3216			cur_chain = chain;
3217		}
3218	}
3219
3220	rx_ring = tqp_vector->rx_group.ring;
3221	if (!tx_ring && rx_ring) {
3222		cur_chain->next = NULL;
3223		cur_chain->tqp_index = rx_ring->tqp->tqp_index;
3224		hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3225			      HNAE3_RING_TYPE_RX);
3226		hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3227				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3228
3229		rx_ring = rx_ring->next;
3230	}
3231
3232	while (rx_ring) {
3233		chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
3234		if (!chain)
3235			goto err_free_chain;
3236
3237		cur_chain->next = chain;
3238		chain->tqp_index = rx_ring->tqp->tqp_index;
3239		hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3240			      HNAE3_RING_TYPE_RX);
3241		hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3242				HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3243
3244		cur_chain = chain;
3245
3246		rx_ring = rx_ring->next;
3247	}
3248
3249	return 0;
3250
3251err_free_chain:
3252	cur_chain = head->next;
3253	while (cur_chain) {
3254		chain = cur_chain->next;
3255		devm_kfree(&pdev->dev, cur_chain);
3256		cur_chain = chain;
3257	}
3258	head->next = NULL;
3259
3260	return -ENOMEM;
3261}
3262
3263static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3264					struct hnae3_ring_chain_node *head)
3265{
3266	struct pci_dev *pdev = tqp_vector->handle->pdev;
3267	struct hnae3_ring_chain_node *chain_tmp, *chain;
3268
3269	chain = head->next;
3270
3271	while (chain) {
3272		chain_tmp = chain->next;
3273		devm_kfree(&pdev->dev, chain);
3274		chain = chain_tmp;
3275	}
3276}
3277
3278static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
3279				   struct hns3_enet_ring *ring)
3280{
3281	ring->next = group->ring;
3282	group->ring = ring;
3283
3284	group->count++;
3285}
3286
3287static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
3288{
3289	struct pci_dev *pdev = priv->ae_handle->pdev;
3290	struct hns3_enet_tqp_vector *tqp_vector;
3291	int num_vectors = priv->vector_num;
3292	int numa_node;
3293	int vector_i;
3294
3295	numa_node = dev_to_node(&pdev->dev);
3296
3297	for (vector_i = 0; vector_i < num_vectors; vector_i++) {
3298		tqp_vector = &priv->tqp_vector[vector_i];
3299		cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
3300				&tqp_vector->affinity_mask);
3301	}
3302}
3303
3304static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
3305{
3306	struct hnae3_ring_chain_node vector_ring_chain;
3307	struct hnae3_handle *h = priv->ae_handle;
3308	struct hns3_enet_tqp_vector *tqp_vector;
3309	int ret = 0;
3310	int i;
3311
3312	hns3_nic_set_cpumask(priv);
3313
3314	for (i = 0; i < priv->vector_num; i++) {
3315		tqp_vector = &priv->tqp_vector[i];
3316		hns3_vector_gl_rl_init_hw(tqp_vector, priv);
3317		tqp_vector->num_tqps = 0;
3318	}
3319
3320	for (i = 0; i < h->kinfo.num_tqps; i++) {
3321		u16 vector_i = i % priv->vector_num;
3322		u16 tqp_num = h->kinfo.num_tqps;
3323
3324		tqp_vector = &priv->tqp_vector[vector_i];
3325
3326		hns3_add_ring_to_group(&tqp_vector->tx_group,
3327				       priv->ring_data[i].ring);
3328
3329		hns3_add_ring_to_group(&tqp_vector->rx_group,
3330				       priv->ring_data[i + tqp_num].ring);
3331
3332		priv->ring_data[i].ring->tqp_vector = tqp_vector;
3333		priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
3334		tqp_vector->num_tqps++;
3335	}
3336
3337	for (i = 0; i < priv->vector_num; i++) {
3338		tqp_vector = &priv->tqp_vector[i];
3339
3340		tqp_vector->rx_group.total_bytes = 0;
3341		tqp_vector->rx_group.total_packets = 0;
3342		tqp_vector->tx_group.total_bytes = 0;
3343		tqp_vector->tx_group.total_packets = 0;
3344		tqp_vector->handle = h;
3345
3346		ret = hns3_get_vector_ring_chain(tqp_vector,
3347						 &vector_ring_chain);
3348		if (ret)
3349			goto map_ring_fail;
3350
3351		ret = h->ae_algo->ops->map_ring_to_vector(h,
3352			tqp_vector->vector_irq, &vector_ring_chain);
3353
3354		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3355
3356		if (ret)
3357			goto map_ring_fail;
3358
3359		netif_napi_add(priv->netdev, &tqp_vector->napi,
3360			       hns3_nic_common_poll, NAPI_POLL_WEIGHT);
3361	}
3362
3363	return 0;
3364
3365map_ring_fail:
3366	while (i--)
3367		netif_napi_del(&priv->tqp_vector[i].napi);
3368
3369	return ret;
3370}
3371
3372static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
3373{
3374#define HNS3_VECTOR_PF_MAX_NUM		64
3375
3376	struct hnae3_handle *h = priv->ae_handle;
3377	struct hns3_enet_tqp_vector *tqp_vector;
3378	struct hnae3_vector_info *vector;
3379	struct pci_dev *pdev = h->pdev;
3380	u16 tqp_num = h->kinfo.num_tqps;
3381	u16 vector_num;
3382	int ret = 0;
3383	u16 i;
3384
3385	/* RSS size, cpu online and vector_num should be the same */
3386	/* Should consider 2p/4p later */
3387	vector_num = min_t(u16, num_online_cpus(), tqp_num);
3388	vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
3389
3390	vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
3391			      GFP_KERNEL);
3392	if (!vector)
3393		return -ENOMEM;
3394
3395	/* save the actual available vector number */
3396	vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
3397
3398	priv->vector_num = vector_num;
3399	priv->tqp_vector = (struct hns3_enet_tqp_vector *)
3400		devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
3401			     GFP_KERNEL);
3402	if (!priv->tqp_vector) {
3403		ret = -ENOMEM;
3404		goto out;
3405	}
3406
3407	for (i = 0; i < priv->vector_num; i++) {
3408		tqp_vector = &priv->tqp_vector[i];
3409		tqp_vector->idx = i;
3410		tqp_vector->mask_addr = vector[i].io_addr;
3411		tqp_vector->vector_irq = vector[i].vector;
3412		hns3_vector_gl_rl_init(tqp_vector, priv);
3413	}
3414
3415out:
3416	devm_kfree(&pdev->dev, vector);
3417	return ret;
3418}
3419
3420static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
3421{
3422	group->ring = NULL;
3423	group->count = 0;
3424}
3425
3426static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
3427{
3428	struct hnae3_ring_chain_node vector_ring_chain;
3429	struct hnae3_handle *h = priv->ae_handle;
3430	struct hns3_enet_tqp_vector *tqp_vector;
3431	int i;
3432
3433	for (i = 0; i < priv->vector_num; i++) {
3434		tqp_vector = &priv->tqp_vector[i];
3435
3436		if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
3437			continue;
3438
3439		hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
3440
3441		h->ae_algo->ops->unmap_ring_from_vector(h,
3442			tqp_vector->vector_irq, &vector_ring_chain);
3443
3444		hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3445
3446		if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
3447			irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
3448			free_irq(tqp_vector->vector_irq, tqp_vector);
3449			tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
3450		}
3451
3452		hns3_clear_ring_group(&tqp_vector->rx_group);
3453		hns3_clear_ring_group(&tqp_vector->tx_group);
3454		netif_napi_del(&priv->tqp_vector[i].napi);
3455	}
3456}
3457
3458static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
3459{
3460	struct hnae3_handle *h = priv->ae_handle;
3461	struct pci_dev *pdev = h->pdev;
3462	int i, ret;
3463
3464	for (i = 0; i < priv->vector_num; i++) {
3465		struct hns3_enet_tqp_vector *tqp_vector;
3466
3467		tqp_vector = &priv->tqp_vector[i];
3468		ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
3469		if (ret)
3470			return ret;
3471	}
3472
3473	devm_kfree(&pdev->dev, priv->tqp_vector);
3474	return 0;
3475}
3476
3477static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
3478			     unsigned int ring_type)
3479{
3480	struct hns3_nic_ring_data *ring_data = priv->ring_data;
3481	int queue_num = priv->ae_handle->kinfo.num_tqps;
3482	struct pci_dev *pdev = priv->ae_handle->pdev;
3483	struct hns3_enet_ring *ring;
3484	int desc_num;
3485
3486	ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
3487	if (!ring)
3488		return -ENOMEM;
3489
3490	if (ring_type == HNAE3_RING_TYPE_TX) {
3491		desc_num = priv->ae_handle->kinfo.num_tx_desc;
3492		ring_data[q->tqp_index].ring = ring;
3493		ring_data[q->tqp_index].queue_index = q->tqp_index;
3494		ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
3495	} else {
3496		desc_num = priv->ae_handle->kinfo.num_rx_desc;
3497		ring_data[q->tqp_index + queue_num].ring = ring;
3498		ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
3499		ring->io_base = q->io_base;
3500	}
3501
3502	hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
3503
3504	ring->tqp = q;
3505	ring->desc = NULL;
3506	ring->desc_cb = NULL;
3507	ring->dev = priv->dev;
3508	ring->desc_dma_addr = 0;
3509	ring->buf_size = q->buf_size;
3510	ring->desc_num = desc_num;
3511	ring->next_to_use = 0;
3512	ring->next_to_clean = 0;
3513
3514	return 0;
3515}
3516
3517static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3518			      struct hns3_nic_priv *priv)
3519{
3520	int ret;
3521
3522	ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
3523	if (ret)
3524		return ret;
3525
3526	ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3527	if (ret) {
3528		devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
3529		return ret;
3530	}
3531
3532	return 0;
3533}
3534
3535static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3536{
3537	struct hnae3_handle *h = priv->ae_handle;
3538	struct pci_dev *pdev = h->pdev;
3539	int i, ret;
3540
3541	priv->ring_data =  devm_kzalloc(&pdev->dev,
3542					array3_size(h->kinfo.num_tqps,
3543						    sizeof(*priv->ring_data),
3544						    2),
3545					GFP_KERNEL);
3546	if (!priv->ring_data)
3547		return -ENOMEM;
3548
3549	for (i = 0; i < h->kinfo.num_tqps; i++) {
3550		ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3551		if (ret)
3552			goto err;
3553	}
3554
3555	return 0;
3556err:
3557	while (i--) {
3558		devm_kfree(priv->dev, priv->ring_data[i].ring);
3559		devm_kfree(priv->dev,
3560			   priv->ring_data[i + h->kinfo.num_tqps].ring);
3561	}
3562
3563	devm_kfree(&pdev->dev, priv->ring_data);
3564	priv->ring_data = NULL;
3565	return ret;
3566}
3567
3568static void hns3_put_ring_config(struct hns3_nic_priv *priv)
3569{
3570	struct hnae3_handle *h = priv->ae_handle;
3571	int i;
3572
3573	if (!priv->ring_data)
3574		return;
3575
3576	for (i = 0; i < h->kinfo.num_tqps; i++) {
3577		devm_kfree(priv->dev, priv->ring_data[i].ring);
3578		devm_kfree(priv->dev,
3579			   priv->ring_data[i + h->kinfo.num_tqps].ring);
3580	}
3581	devm_kfree(priv->dev, priv->ring_data);
3582	priv->ring_data = NULL;
3583}
3584
3585static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
3586{
3587	int ret;
3588
3589	if (ring->desc_num <= 0 || ring->buf_size <= 0)
3590		return -EINVAL;
3591
3592	ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
3593				     sizeof(ring->desc_cb[0]), GFP_KERNEL);
3594	if (!ring->desc_cb) {
3595		ret = -ENOMEM;
3596		goto out;
3597	}
3598
3599	ret = hns3_alloc_desc(ring);
3600	if (ret)
3601		goto out_with_desc_cb;
3602
3603	if (!HNAE3_IS_TX_RING(ring)) {
3604		ret = hns3_alloc_ring_buffers(ring);
3605		if (ret)
3606			goto out_with_desc;
3607	}
3608
3609	return 0;
3610
3611out_with_desc:
3612	hns3_free_desc(ring);
3613out_with_desc_cb:
3614	devm_kfree(ring_to_dev(ring), ring->desc_cb);
3615	ring->desc_cb = NULL;
3616out:
3617	return ret;
3618}
3619
3620void hns3_fini_ring(struct hns3_enet_ring *ring)
3621{
3622	hns3_free_desc(ring);
3623	devm_kfree(ring_to_dev(ring), ring->desc_cb);
3624	ring->desc_cb = NULL;
3625	ring->next_to_clean = 0;
3626	ring->next_to_use = 0;
3627	ring->pending_buf = 0;
3628	if (ring->skb) {
3629		dev_kfree_skb_any(ring->skb);
3630		ring->skb = NULL;
3631	}
3632}
3633
3634static int hns3_buf_size2type(u32 buf_size)
3635{
3636	int bd_size_type;
3637
3638	switch (buf_size) {
3639	case 512:
3640		bd_size_type = HNS3_BD_SIZE_512_TYPE;
3641		break;
3642	case 1024:
3643		bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3644		break;
3645	case 2048:
3646		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3647		break;
3648	case 4096:
3649		bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3650		break;
3651	default:
3652		bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3653	}
3654
3655	return bd_size_type;
3656}
3657
3658static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3659{
3660	dma_addr_t dma = ring->desc_dma_addr;
3661	struct hnae3_queue *q = ring->tqp;
3662
3663	if (!HNAE3_IS_TX_RING(ring)) {
3664		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
3665		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3666			       (u32)((dma >> 31) >> 1));
3667
3668		hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3669			       hns3_buf_size2type(ring->buf_size));
3670		hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3671			       ring->desc_num / 8 - 1);
3672
3673	} else {
3674		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3675			       (u32)dma);
3676		hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3677			       (u32)((dma >> 31) >> 1));
3678
3679		hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3680			       ring->desc_num / 8 - 1);
3681	}
3682}
3683
3684static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3685{
3686	struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3687	int i;
3688
3689	for (i = 0; i < HNAE3_MAX_TC; i++) {
3690		struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3691		int j;
3692
3693		if (!tc_info->enable)
3694			continue;
3695
3696		for (j = 0; j < tc_info->tqp_count; j++) {
3697			struct hnae3_queue *q;
3698
3699			q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3700			hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3701				       tc_info->tc);
3702		}
3703	}
3704}
3705
3706int hns3_init_all_ring(struct hns3_nic_priv *priv)
3707{
3708	struct hnae3_handle *h = priv->ae_handle;
3709	int ring_num = h->kinfo.num_tqps * 2;
3710	int i, j;
3711	int ret;
3712
3713	for (i = 0; i < ring_num; i++) {
3714		ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3715		if (ret) {
3716			dev_err(priv->dev,
3717				"Alloc ring memory fail! ret=%d\n", ret);
3718			goto out_when_alloc_ring_memory;
3719		}
3720
3721		u64_stats_init(&priv->ring_data[i].ring->syncp);
3722	}
3723
3724	return 0;
3725
3726out_when_alloc_ring_memory:
3727	for (j = i - 1; j >= 0; j--)
3728		hns3_fini_ring(priv->ring_data[j].ring);
3729
3730	return -ENOMEM;
3731}
3732
3733int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3734{
3735	struct hnae3_handle *h = priv->ae_handle;
3736	int i;
3737
3738	for (i = 0; i < h->kinfo.num_tqps; i++) {
3739		hns3_fini_ring(priv->ring_data[i].ring);
3740		hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3741	}
3742	return 0;
3743}
3744
3745/* Set mac addr if it is configured. or leave it to the AE driver */
3746static int hns3_init_mac_addr(struct net_device *netdev, bool init)
3747{
3748	struct hns3_nic_priv *priv = netdev_priv(netdev);
3749	struct hnae3_handle *h = priv->ae_handle;
3750	u8 mac_addr_temp[ETH_ALEN];
3751	int ret = 0;
3752
3753	if (h->ae_algo->ops->get_mac_addr && init) {
3754		h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3755		ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3756	}
3757
3758	/* Check if the MAC address is valid, if not get a random one */
3759	if (!is_valid_ether_addr(netdev->dev_addr)) {
3760		eth_hw_addr_random(netdev);
3761		dev_warn(priv->dev, "using random MAC address %pM\n",
3762			 netdev->dev_addr);
3763	}
3764
3765	if (h->ae_algo->ops->set_mac_addr)
3766		ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3767
3768	return ret;
3769}
3770
3771static int hns3_init_phy(struct net_device *netdev)
3772{
3773	struct hnae3_handle *h = hns3_get_handle(netdev);
3774	int ret = 0;
3775
3776	if (h->ae_algo->ops->mac_connect_phy)
3777		ret = h->ae_algo->ops->mac_connect_phy(h);
3778
3779	return ret;
3780}
3781
3782static void hns3_uninit_phy(struct net_device *netdev)
3783{
3784	struct hnae3_handle *h = hns3_get_handle(netdev);
3785
3786	if (h->ae_algo->ops->mac_disconnect_phy)
3787		h->ae_algo->ops->mac_disconnect_phy(h);
3788}
3789
3790static int hns3_restore_fd_rules(struct net_device *netdev)
3791{
3792	struct hnae3_handle *h = hns3_get_handle(netdev);
3793	int ret = 0;
3794
3795	if (h->ae_algo->ops->restore_fd_rules)
3796		ret = h->ae_algo->ops->restore_fd_rules(h);
3797
3798	return ret;
3799}
3800
3801static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3802{
3803	struct hnae3_handle *h = hns3_get_handle(netdev);
3804
3805	if (h->ae_algo->ops->del_all_fd_entries)
3806		h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3807}
3808
3809static int hns3_client_start(struct hnae3_handle *handle)
3810{
3811	if (!handle->ae_algo->ops->client_start)
3812		return 0;
3813
3814	return handle->ae_algo->ops->client_start(handle);
3815}
3816
3817static void hns3_client_stop(struct hnae3_handle *handle)
3818{
3819	if (!handle->ae_algo->ops->client_stop)
3820		return;
3821
3822	handle->ae_algo->ops->client_stop(handle);
3823}
3824
3825static void hns3_info_show(struct hns3_nic_priv *priv)
3826{
3827	struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3828
3829	dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
3830	dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
3831	dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
3832	dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
3833	dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
3834	dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
3835	dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
3836	dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
3837	dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
3838}
3839
3840static int hns3_client_init(struct hnae3_handle *handle)
3841{
3842	struct pci_dev *pdev = handle->pdev;
3843	u16 alloc_tqps, max_rss_size;
3844	struct hns3_nic_priv *priv;
3845	struct net_device *netdev;
3846	int ret;
3847
3848	handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3849						    &max_rss_size);
3850	netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3851	if (!netdev)
3852		return -ENOMEM;
3853
3854	priv = netdev_priv(netdev);
3855	priv->dev = &pdev->dev;
3856	priv->netdev = netdev;
3857	priv->ae_handle = handle;
3858	priv->tx_timeout_count = 0;
3859	set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
3860
3861	handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
3862
3863	handle->kinfo.netdev = netdev;
3864	handle->priv = (void *)priv;
3865
3866	hns3_init_mac_addr(netdev, true);
3867
3868	hns3_set_default_feature(netdev);
3869
3870	netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3871	netdev->priv_flags |= IFF_UNICAST_FLT;
3872	netdev->netdev_ops = &hns3_nic_netdev_ops;
3873	SET_NETDEV_DEV(netdev, &pdev->dev);
3874	hns3_ethtool_set_ops(netdev);
3875
3876	/* Carrier off reporting is important to ethtool even BEFORE open */
3877	netif_carrier_off(netdev);
3878
3879	ret = hns3_get_ring_config(priv);
3880	if (ret) {
3881		ret = -ENOMEM;
3882		goto out_get_ring_cfg;
3883	}
3884
3885	ret = hns3_nic_alloc_vector_data(priv);
3886	if (ret) {
3887		ret = -ENOMEM;
3888		goto out_alloc_vector_data;
3889	}
3890
3891	ret = hns3_nic_init_vector_data(priv);
3892	if (ret) {
3893		ret = -ENOMEM;
3894		goto out_init_vector_data;
3895	}
3896
3897	ret = hns3_init_all_ring(priv);
3898	if (ret) {
3899		ret = -ENOMEM;
3900		goto out_init_ring_data;
3901	}
3902
3903	ret = hns3_init_phy(netdev);
3904	if (ret)
3905		goto out_init_phy;
3906
3907	ret = register_netdev(netdev);
3908	if (ret) {
3909		dev_err(priv->dev, "probe register netdev fail!\n");
3910		goto out_reg_netdev_fail;
3911	}
3912
3913	ret = hns3_client_start(handle);
3914	if (ret) {
3915		dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
3916		goto out_client_start;
3917	}
3918
3919	hns3_dcbnl_setup(handle);
3920
3921	hns3_dbg_init(handle);
3922
3923	/* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
3924	netdev->max_mtu = HNS3_MAX_MTU;
3925
3926	set_bit(HNS3_NIC_STATE_INITED, &priv->state);
3927
3928	if (netif_msg_drv(handle))
3929		hns3_info_show(priv);
3930
3931	return ret;
3932
3933out_client_start:
3934	unregister_netdev(netdev);
3935out_reg_netdev_fail:
3936	hns3_uninit_phy(netdev);
3937out_init_phy:
3938	hns3_uninit_all_ring(priv);
3939out_init_ring_data:
3940	hns3_nic_uninit_vector_data(priv);
3941out_init_vector_data:
3942	hns3_nic_dealloc_vector_data(priv);
3943out_alloc_vector_data:
3944	priv->ring_data = NULL;
3945out_get_ring_cfg:
3946	priv->ae_handle = NULL;
3947	free_netdev(netdev);
3948	return ret;
3949}
3950
3951static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3952{
3953	struct net_device *netdev = handle->kinfo.netdev;
3954	struct hns3_nic_priv *priv = netdev_priv(netdev);
3955	int ret;
3956
3957	hns3_remove_hw_addr(netdev);
3958
3959	if (netdev->reg_state != NETREG_UNINITIALIZED)
3960		unregister_netdev(netdev);
3961
3962	hns3_client_stop(handle);
3963
3964	hns3_uninit_phy(netdev);
3965
3966	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
3967		netdev_warn(netdev, "already uninitialized\n");
3968		goto out_netdev_free;
3969	}
3970
3971	hns3_del_all_fd_rules(netdev, true);
3972
3973	hns3_clear_all_ring(handle, true);
3974
3975	hns3_nic_uninit_vector_data(priv);
3976
3977	ret = hns3_nic_dealloc_vector_data(priv);
3978	if (ret)
3979		netdev_err(netdev, "dealloc vector error\n");
3980
3981	ret = hns3_uninit_all_ring(priv);
3982	if (ret)
3983		netdev_err(netdev, "uninit ring error\n");
3984
3985	hns3_put_ring_config(priv);
3986
3987	hns3_dbg_uninit(handle);
3988
3989out_netdev_free:
3990	free_netdev(netdev);
3991}
3992
3993static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3994{
3995	struct net_device *netdev = handle->kinfo.netdev;
3996
3997	if (!netdev)
3998		return;
3999
4000	if (linkup) {
4001		netif_carrier_on(netdev);
4002		netif_tx_wake_all_queues(netdev);
4003		if (netif_msg_link(handle))
4004			netdev_info(netdev, "link up\n");
4005	} else {
4006		netif_carrier_off(netdev);
4007		netif_tx_stop_all_queues(netdev);
4008		if (netif_msg_link(handle))
4009			netdev_info(netdev, "link down\n");
4010	}
4011}
4012
4013static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
4014{
4015	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4016	struct net_device *ndev = kinfo->netdev;
4017
4018	if (tc > HNAE3_MAX_TC)
4019		return -EINVAL;
4020
4021	if (!ndev)
4022		return -ENODEV;
4023
4024	return hns3_nic_set_real_num_queue(ndev);
4025}
4026
4027static int hns3_recover_hw_addr(struct net_device *ndev)
4028{
4029	struct netdev_hw_addr_list *list;
4030	struct netdev_hw_addr *ha, *tmp;
4031	int ret = 0;
4032
4033	netif_addr_lock_bh(ndev);
4034	/* go through and sync uc_addr entries to the device */
4035	list = &ndev->uc;
4036	list_for_each_entry_safe(ha, tmp, &list->list, list) {
4037		ret = hns3_nic_uc_sync(ndev, ha->addr);
4038		if (ret)
4039			goto out;
4040	}
4041
4042	/* go through and sync mc_addr entries to the device */
4043	list = &ndev->mc;
4044	list_for_each_entry_safe(ha, tmp, &list->list, list) {
4045		ret = hns3_nic_mc_sync(ndev, ha->addr);
4046		if (ret)
4047			goto out;
4048	}
4049
4050out:
4051	netif_addr_unlock_bh(ndev);
4052	return ret;
4053}
4054
4055static void hns3_remove_hw_addr(struct net_device *netdev)
4056{
4057	struct netdev_hw_addr_list *list;
4058	struct netdev_hw_addr *ha, *tmp;
4059
4060	hns3_nic_uc_unsync(netdev, netdev->dev_addr);
4061
4062	netif_addr_lock_bh(netdev);
4063	/* go through and unsync uc_addr entries to the device */
4064	list = &netdev->uc;
4065	list_for_each_entry_safe(ha, tmp, &list->list, list)
4066		hns3_nic_uc_unsync(netdev, ha->addr);
4067
4068	/* go through and unsync mc_addr entries to the device */
4069	list = &netdev->mc;
4070	list_for_each_entry_safe(ha, tmp, &list->list, list)
4071		if (ha->refcount > 1)
4072			hns3_nic_mc_unsync(netdev, ha->addr);
4073
4074	netif_addr_unlock_bh(netdev);
4075}
4076
4077static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
4078{
4079	while (ring->next_to_clean != ring->next_to_use) {
4080		ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
4081		hns3_free_buffer_detach(ring, ring->next_to_clean);
4082		ring_ptr_move_fw(ring, next_to_clean);
4083	}
4084}
4085
4086static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
4087{
4088	struct hns3_desc_cb res_cbs;
4089	int ret;
4090
4091	while (ring->next_to_use != ring->next_to_clean) {
4092		/* When a buffer is not reused, it's memory has been
4093		 * freed in hns3_handle_rx_bd or will be freed by
4094		 * stack, so we need to replace the buffer here.
4095		 */
4096		if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4097			ret = hns3_reserve_buffer_map(ring, &res_cbs);
4098			if (ret) {
4099				u64_stats_update_begin(&ring->syncp);
4100				ring->stats.sw_err_cnt++;
4101				u64_stats_update_end(&ring->syncp);
4102				/* if alloc new buffer fail, exit directly
4103				 * and reclear in up flow.
4104				 */
4105				netdev_warn(ring->tqp->handle->kinfo.netdev,
4106					    "reserve buffer map failed, ret = %d\n",
4107					    ret);
4108				return ret;
4109			}
4110			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
4111		}
4112		ring_ptr_move_fw(ring, next_to_use);
4113	}
4114
4115	/* Free the pending skb in rx ring */
4116	if (ring->skb) {
4117		dev_kfree_skb_any(ring->skb);
4118		ring->skb = NULL;
4119		ring->pending_buf = 0;
4120	}
4121
4122	return 0;
4123}
4124
4125static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
4126{
4127	while (ring->next_to_use != ring->next_to_clean) {
4128		/* When a buffer is not reused, it's memory has been
4129		 * freed in hns3_handle_rx_bd or will be freed by
4130		 * stack, so only need to unmap the buffer here.
4131		 */
4132		if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4133			hns3_unmap_buffer(ring,
4134					  &ring->desc_cb[ring->next_to_use]);
4135			ring->desc_cb[ring->next_to_use].dma = 0;
4136		}
4137
4138		ring_ptr_move_fw(ring, next_to_use);
4139	}
4140}
4141
4142static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
4143{
4144	struct net_device *ndev = h->kinfo.netdev;
4145	struct hns3_nic_priv *priv = netdev_priv(ndev);
4146	u32 i;
4147
4148	for (i = 0; i < h->kinfo.num_tqps; i++) {
4149		struct hns3_enet_ring *ring;
4150
4151		ring = priv->ring_data[i].ring;
4152		hns3_clear_tx_ring(ring);
4153
4154		ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4155		/* Continue to clear other rings even if clearing some
4156		 * rings failed.
4157		 */
4158		if (force)
4159			hns3_force_clear_rx_ring(ring);
4160		else
4161			hns3_clear_rx_ring(ring);
4162	}
4163}
4164
4165int hns3_nic_reset_all_ring(struct hnae3_handle *h)
4166{
4167	struct net_device *ndev = h->kinfo.netdev;
4168	struct hns3_nic_priv *priv = netdev_priv(ndev);
4169	struct hns3_enet_ring *rx_ring;
4170	int i, j;
4171	int ret;
4172
4173	for (i = 0; i < h->kinfo.num_tqps; i++) {
4174		ret = h->ae_algo->ops->reset_queue(h, i);
4175		if (ret)
4176			return ret;
4177
4178		hns3_init_ring_hw(priv->ring_data[i].ring);
4179
4180		/* We need to clear tx ring here because self test will
4181		 * use the ring and will not run down before up
4182		 */
4183		hns3_clear_tx_ring(priv->ring_data[i].ring);
4184		priv->ring_data[i].ring->next_to_clean = 0;
4185		priv->ring_data[i].ring->next_to_use = 0;
4186
4187		rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4188		hns3_init_ring_hw(rx_ring);
4189		ret = hns3_clear_rx_ring(rx_ring);
4190		if (ret)
4191			return ret;
4192
4193		/* We can not know the hardware head and tail when this
4194		 * function is called in reset flow, so we reuse all desc.
4195		 */
4196		for (j = 0; j < rx_ring->desc_num; j++)
4197			hns3_reuse_buffer(rx_ring, j);
4198
4199		rx_ring->next_to_clean = 0;
4200		rx_ring->next_to_use = 0;
4201	}
4202
4203	hns3_init_tx_ring_tc(priv);
4204
4205	return 0;
4206}
4207
4208static void hns3_store_coal(struct hns3_nic_priv *priv)
4209{
4210	/* ethtool only support setting and querying one coal
4211	 * configuration for now, so save the vector 0' coal
4212	 * configuration here in order to restore it.
4213	 */
4214	memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
4215	       sizeof(struct hns3_enet_coalesce));
4216	memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
4217	       sizeof(struct hns3_enet_coalesce));
4218}
4219
4220static void hns3_restore_coal(struct hns3_nic_priv *priv)
4221{
4222	u16 vector_num = priv->vector_num;
4223	int i;
4224
4225	for (i = 0; i < vector_num; i++) {
4226		memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
4227		       sizeof(struct hns3_enet_coalesce));
4228		memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
4229		       sizeof(struct hns3_enet_coalesce));
4230	}
4231}
4232
4233static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
4234{
4235	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4236	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4237	struct net_device *ndev = kinfo->netdev;
4238	struct hns3_nic_priv *priv = netdev_priv(ndev);
4239
4240	if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
4241		return 0;
4242
4243	/* it is cumbersome for hardware to pick-and-choose entries for deletion
4244	 * from table space. Hence, for function reset software intervention is
4245	 * required to delete the entries
4246	 */
4247	if (hns3_dev_ongoing_func_reset(ae_dev)) {
4248		hns3_remove_hw_addr(ndev);
4249		hns3_del_all_fd_rules(ndev, false);
4250	}
4251
4252	if (!netif_running(ndev))
4253		return 0;
4254
4255	return hns3_nic_net_stop(ndev);
4256}
4257
4258static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
4259{
4260	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4261	struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
4262	int ret = 0;
4263
4264	clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4265
4266	if (netif_running(kinfo->netdev)) {
4267		ret = hns3_nic_net_open(kinfo->netdev);
4268		if (ret) {
4269			set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4270			netdev_err(kinfo->netdev,
4271				   "net up fail, ret=%d!\n", ret);
4272			return ret;
4273		}
4274	}
4275
4276	return ret;
4277}
4278
4279static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
4280{
4281	struct net_device *netdev = handle->kinfo.netdev;
4282	struct hns3_nic_priv *priv = netdev_priv(netdev);
4283	int ret;
4284
4285	/* Carrier off reporting is important to ethtool even BEFORE open */
4286	netif_carrier_off(netdev);
4287
4288	ret = hns3_get_ring_config(priv);
4289	if (ret)
4290		return ret;
4291
4292	ret = hns3_nic_alloc_vector_data(priv);
4293	if (ret)
4294		goto err_put_ring;
4295
4296	hns3_restore_coal(priv);
4297
4298	ret = hns3_nic_init_vector_data(priv);
4299	if (ret)
4300		goto err_dealloc_vector;
4301
4302	ret = hns3_init_all_ring(priv);
4303	if (ret)
4304		goto err_uninit_vector;
4305
4306	ret = hns3_client_start(handle);
4307	if (ret) {
4308		dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
4309		goto err_uninit_ring;
4310	}
4311
4312	set_bit(HNS3_NIC_STATE_INITED, &priv->state);
4313
4314	return ret;
4315
4316err_uninit_ring:
4317	hns3_uninit_all_ring(priv);
4318err_uninit_vector:
4319	hns3_nic_uninit_vector_data(priv);
4320err_dealloc_vector:
4321	hns3_nic_dealloc_vector_data(priv);
4322err_put_ring:
4323	hns3_put_ring_config(priv);
4324
4325	return ret;
4326}
4327
4328static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
4329{
4330	struct net_device *netdev = handle->kinfo.netdev;
4331	bool vlan_filter_enable;
4332	int ret;
4333
4334	ret = hns3_init_mac_addr(netdev, false);
4335	if (ret)
4336		return ret;
4337
4338	ret = hns3_recover_hw_addr(netdev);
4339	if (ret)
4340		return ret;
4341
4342	ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
4343	if (ret)
4344		return ret;
4345
4346	vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
4347	hns3_enable_vlan_filter(netdev, vlan_filter_enable);
4348
4349	if (handle->ae_algo->ops->restore_vlan_table)
4350		handle->ae_algo->ops->restore_vlan_table(handle);
4351
4352	return hns3_restore_fd_rules(netdev);
4353}
4354
4355static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
4356{
4357	struct net_device *netdev = handle->kinfo.netdev;
4358	struct hns3_nic_priv *priv = netdev_priv(netdev);
4359	int ret;
4360
4361	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
4362		netdev_warn(netdev, "already uninitialized\n");
4363		return 0;
4364	}
4365
4366	hns3_clear_all_ring(handle, true);
4367	hns3_reset_tx_queue(priv->ae_handle);
4368
4369	hns3_nic_uninit_vector_data(priv);
4370
4371	hns3_store_coal(priv);
4372
4373	ret = hns3_nic_dealloc_vector_data(priv);
4374	if (ret)
4375		netdev_err(netdev, "dealloc vector error\n");
4376
4377	ret = hns3_uninit_all_ring(priv);
4378	if (ret)
4379		netdev_err(netdev, "uninit ring error\n");
4380
4381	hns3_put_ring_config(priv);
4382
4383	return ret;
4384}
4385
4386static int hns3_reset_notify(struct hnae3_handle *handle,
4387			     enum hnae3_reset_notify_type type)
4388{
4389	int ret = 0;
4390
4391	switch (type) {
4392	case HNAE3_UP_CLIENT:
4393		ret = hns3_reset_notify_up_enet(handle);
4394		break;
4395	case HNAE3_DOWN_CLIENT:
4396		ret = hns3_reset_notify_down_enet(handle);
4397		break;
4398	case HNAE3_INIT_CLIENT:
4399		ret = hns3_reset_notify_init_enet(handle);
4400		break;
4401	case HNAE3_UNINIT_CLIENT:
4402		ret = hns3_reset_notify_uninit_enet(handle);
4403		break;
4404	case HNAE3_RESTORE_CLIENT:
4405		ret = hns3_reset_notify_restore_enet(handle);
4406		break;
4407	default:
4408		break;
4409	}
4410
4411	return ret;
4412}
4413
4414static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num,
4415				bool rxfh_configured)
4416{
4417	int ret;
4418
4419	ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num,
4420						 rxfh_configured);
4421	if (ret) {
4422		dev_err(&handle->pdev->dev,
4423			"Change tqp num(%u) fail.\n", new_tqp_num);
4424		return ret;
4425	}
4426
4427	ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT);
4428	if (ret)
4429		return ret;
4430
4431	ret =  hns3_reset_notify(handle, HNAE3_UP_CLIENT);
4432	if (ret)
4433		hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT);
4434
4435	return ret;
4436}
4437
4438int hns3_set_channels(struct net_device *netdev,
4439		      struct ethtool_channels *ch)
4440{
4441	struct hnae3_handle *h = hns3_get_handle(netdev);
4442	struct hnae3_knic_private_info *kinfo = &h->kinfo;
4443	bool rxfh_configured = netif_is_rxfh_configured(netdev);
4444	u32 new_tqp_num = ch->combined_count;
4445	u16 org_tqp_num;
4446	int ret;
4447
4448	if (hns3_nic_resetting(netdev))
4449		return -EBUSY;
4450
4451	if (ch->rx_count || ch->tx_count)
4452		return -EINVAL;
4453
4454	if (new_tqp_num > hns3_get_max_available_channels(h) ||
4455	    new_tqp_num < 1) {
4456		dev_err(&netdev->dev,
4457			"Change tqps fail, the tqp range is from 1 to %d",
4458			hns3_get_max_available_channels(h));
4459		return -EINVAL;
4460	}
4461
4462	if (kinfo->rss_size == new_tqp_num)
4463		return 0;
4464
4465	netif_dbg(h, drv, netdev,
4466		  "set channels: tqp_num=%u, rxfh=%d\n",
4467		  new_tqp_num, rxfh_configured);
4468
4469	ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
4470	if (ret)
4471		return ret;
4472
4473	ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
4474	if (ret)
4475		return ret;
4476
4477	org_tqp_num = h->kinfo.num_tqps;
4478	ret = hns3_change_channels(h, new_tqp_num, rxfh_configured);
4479	if (ret) {
4480		int ret1;
4481
4482		netdev_warn(netdev,
4483			    "Change channels fail, revert to old value\n");
4484		ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured);
4485		if (ret1) {
4486			netdev_err(netdev,
4487				   "revert to old channel fail\n");
4488			return ret1;
4489		}
4490
4491		return ret;
4492	}
4493
4494	return 0;
4495}
4496
4497static const struct hns3_hw_error_info hns3_hw_err[] = {
4498	{ .type = HNAE3_PPU_POISON_ERROR,
4499	  .msg = "PPU poison" },
4500	{ .type = HNAE3_CMDQ_ECC_ERROR,
4501	  .msg = "IMP CMDQ error" },
4502	{ .type = HNAE3_IMP_RD_POISON_ERROR,
4503	  .msg = "IMP RD poison" },
4504};
4505
4506static void hns3_process_hw_error(struct hnae3_handle *handle,
4507				  enum hnae3_hw_error_type type)
4508{
4509	int i;
4510
4511	for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
4512		if (hns3_hw_err[i].type == type) {
4513			dev_err(&handle->pdev->dev, "Detected %s!\n",
4514				hns3_hw_err[i].msg);
4515			break;
4516		}
4517	}
4518}
4519
4520static const struct hnae3_client_ops client_ops = {
4521	.init_instance = hns3_client_init,
4522	.uninit_instance = hns3_client_uninit,
4523	.link_status_change = hns3_link_status_change,
4524	.setup_tc = hns3_client_setup_tc,
4525	.reset_notify = hns3_reset_notify,
4526	.process_hw_error = hns3_process_hw_error,
4527};
4528
4529/* hns3_init_module - Driver registration routine
4530 * hns3_init_module is the first routine called when the driver is
4531 * loaded. All it does is register with the PCI subsystem.
4532 */
4533static int __init hns3_init_module(void)
4534{
4535	int ret;
4536
4537	pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
4538	pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
4539
4540	client.type = HNAE3_CLIENT_KNIC;
4541	snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
4542		 hns3_driver_name);
4543
4544	client.ops = &client_ops;
4545
4546	INIT_LIST_HEAD(&client.node);
4547
4548	hns3_dbg_register_debugfs(hns3_driver_name);
4549
4550	ret = hnae3_register_client(&client);
4551	if (ret)
4552		goto err_reg_client;
4553
4554	ret = pci_register_driver(&hns3_driver);
4555	if (ret)
4556		goto err_reg_driver;
4557
4558	return ret;
4559
4560err_reg_driver:
4561	hnae3_unregister_client(&client);
4562err_reg_client:
4563	hns3_dbg_unregister_debugfs();
4564	return ret;
4565}
4566module_init(hns3_init_module);
4567
4568/* hns3_exit_module - Driver exit cleanup routine
4569 * hns3_exit_module is called just before the driver is removed
4570 * from memory.
4571 */
4572static void __exit hns3_exit_module(void)
4573{
4574	pci_unregister_driver(&hns3_driver);
4575	hnae3_unregister_client(&client);
4576	hns3_dbg_unregister_debugfs();
4577}
4578module_exit(hns3_exit_module);
4579
4580MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4581MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4582MODULE_LICENSE("GPL");
4583MODULE_ALIAS("pci:hns-nic");
4584MODULE_VERSION(HNS3_MOD_VERSION);