Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Huawei HiNIC PCI Express Linux driver
   4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/moduleparam.h>
  10#include <linux/pci.h>
  11#include <linux/device.h>
  12#include <linux/errno.h>
  13#include <linux/types.h>
  14#include <linux/etherdevice.h>
  15#include <linux/netdevice.h>
  16#include <linux/slab.h>
  17#include <linux/if_vlan.h>
  18#include <linux/semaphore.h>
  19#include <linux/workqueue.h>
  20#include <net/ip.h>
  21#include <net/devlink.h>
  22#include <linux/bitops.h>
  23#include <linux/bitmap.h>
  24#include <linux/delay.h>
  25#include <linux/err.h>
  26
  27#include "hinic_debugfs.h"
  28#include "hinic_hw_qp.h"
  29#include "hinic_hw_dev.h"
  30#include "hinic_devlink.h"
  31#include "hinic_port.h"
  32#include "hinic_tx.h"
  33#include "hinic_rx.h"
  34#include "hinic_dev.h"
  35#include "hinic_sriov.h"
  36
  37MODULE_AUTHOR("Huawei Technologies CO., Ltd");
  38MODULE_DESCRIPTION("Huawei Intelligent NIC driver");
  39MODULE_LICENSE("GPL");
  40
  41static unsigned int tx_weight = 64;
  42module_param(tx_weight, uint, 0644);
  43MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)");
  44
  45static unsigned int rx_weight = 64;
  46module_param(rx_weight, uint, 0644);
  47MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
  48
  49#define HINIC_DEV_ID_QUAD_PORT_25GE         0x1822
  50#define HINIC_DEV_ID_DUAL_PORT_100GE        0x0200
  51#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ   0x0205
  52#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ    0x0210
  53#define HINIC_DEV_ID_VF    0x375e
  54
  55#define HINIC_WQ_NAME                   "hinic_dev"
  56
  57#define MSG_ENABLE_DEFAULT              (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
  58					 NETIF_MSG_IFUP |                  \
  59					 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
  60
  61#define HINIC_LRO_MAX_WQE_NUM_DEFAULT	8
  62
  63#define HINIC_LRO_RX_TIMER_DEFAULT	16
  64
 
 
  65#define work_to_rx_mode_work(work)      \
  66		container_of(work, struct hinic_rx_mode_work, work)
  67
  68#define rx_mode_work_to_nic_dev(rx_mode_work) \
  69		container_of(rx_mode_work, struct hinic_dev, rx_mode_work)
  70
  71#define HINIC_WAIT_SRIOV_CFG_TIMEOUT	15000
  72
  73#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT		2
  74#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG	32
  75#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG		7
  76
  77static int change_mac_addr(struct net_device *netdev, const u8 *addr);
  78
  79static int set_features(struct hinic_dev *nic_dev,
  80			netdev_features_t pre_features,
  81			netdev_features_t features, bool force_change);
  82
  83static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
  84{
 
  85	struct hinic_rxq_stats rx_stats;
  86
 
 
  87	hinic_rxq_get_stats(rxq, &rx_stats);
  88
 
  89	nic_rx_stats->bytes += rx_stats.bytes;
  90	nic_rx_stats->pkts  += rx_stats.pkts;
  91	nic_rx_stats->errors += rx_stats.errors;
  92	nic_rx_stats->csum_errors += rx_stats.csum_errors;
  93	nic_rx_stats->other_errors += rx_stats.other_errors;
 
 
 
  94}
  95
  96static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq)
  97{
 
  98	struct hinic_txq_stats tx_stats;
  99
 
 
 100	hinic_txq_get_stats(txq, &tx_stats);
 101
 
 102	nic_tx_stats->bytes += tx_stats.bytes;
 103	nic_tx_stats->pkts += tx_stats.pkts;
 104	nic_tx_stats->tx_busy += tx_stats.tx_busy;
 105	nic_tx_stats->tx_wake += tx_stats.tx_wake;
 106	nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
 107	nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
 
 
 
 108}
 109
 110static void gather_nic_stats(struct hinic_dev *nic_dev,
 111			     struct hinic_rxq_stats *nic_rx_stats,
 112			     struct hinic_txq_stats *nic_tx_stats)
 113{
 114	int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
 115
 116	for (i = 0; i < num_qps; i++)
 117		gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]);
 118
 119	for (i = 0; i < num_qps; i++)
 120		gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]);
 121}
 122
 123/**
 124 * create_txqs - Create the Logical Tx Queues of specific NIC device
 125 * @nic_dev: the specific NIC device
 126 *
 127 * Return 0 - Success, negative - Failure
 128 **/
 129static int create_txqs(struct hinic_dev *nic_dev)
 130{
 131	int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 132	struct net_device *netdev = nic_dev->netdev;
 
 133
 134	if (nic_dev->txqs)
 135		return -EINVAL;
 136
 137	nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
 138				     sizeof(*nic_dev->txqs), GFP_KERNEL);
 139	if (!nic_dev->txqs)
 140		return -ENOMEM;
 141
 142	hinic_sq_dbgfs_init(nic_dev);
 143
 144	for (i = 0; i < num_txqs; i++) {
 145		struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
 146
 147		err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev);
 148		if (err) {
 149			netif_err(nic_dev, drv, netdev,
 150				  "Failed to init Txq\n");
 151			goto err_init_txq;
 152		}
 153
 154		err = hinic_sq_debug_add(nic_dev, i);
 155		if (err) {
 156			netif_err(nic_dev, drv, netdev,
 157				  "Failed to add SQ%d debug\n", i);
 158			goto err_add_sq_dbg;
 159		}
 160	}
 161
 162	return 0;
 163
 164err_add_sq_dbg:
 165	hinic_clean_txq(&nic_dev->txqs[i]);
 166err_init_txq:
 167	for (j = 0; j < i; j++) {
 168		hinic_sq_debug_rem(nic_dev->txqs[j].sq);
 169		hinic_clean_txq(&nic_dev->txqs[j]);
 170	}
 171
 172	hinic_sq_dbgfs_uninit(nic_dev);
 173
 174	devm_kfree(&netdev->dev, nic_dev->txqs);
 175	nic_dev->txqs = NULL;
 176	return err;
 177}
 178
 179static void enable_txqs_napi(struct hinic_dev *nic_dev)
 180{
 181	int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 182	int i;
 183
 184	for (i = 0; i < num_txqs; i++)
 185		napi_enable(&nic_dev->txqs[i].napi);
 186}
 187
 188static void disable_txqs_napi(struct hinic_dev *nic_dev)
 189{
 190	int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 191	int i;
 192
 193	for (i = 0; i < num_txqs; i++)
 194		napi_disable(&nic_dev->txqs[i].napi);
 195}
 196
 197/**
 198 * free_txqs - Free the Logical Tx Queues of specific NIC device
 199 * @nic_dev: the specific NIC device
 200 **/
 201static void free_txqs(struct hinic_dev *nic_dev)
 202{
 203	int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 204	struct net_device *netdev = nic_dev->netdev;
 205
 206	if (!nic_dev->txqs)
 207		return;
 208
 209	for (i = 0; i < num_txqs; i++) {
 210		hinic_sq_debug_rem(nic_dev->txqs[i].sq);
 211		hinic_clean_txq(&nic_dev->txqs[i]);
 212	}
 213
 214	hinic_sq_dbgfs_uninit(nic_dev);
 215
 216	devm_kfree(&netdev->dev, nic_dev->txqs);
 217	nic_dev->txqs = NULL;
 218}
 219
 220/**
 221 * create_rxqs - Create the Logical Rx Queues of specific NIC device
 222 * @nic_dev: the specific NIC device
 223 *
 224 * Return 0 - Success, negative - Failure
 225 **/
 226static int create_rxqs(struct hinic_dev *nic_dev)
 227{
 228	int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 229	struct net_device *netdev = nic_dev->netdev;
 
 230
 231	if (nic_dev->rxqs)
 232		return -EINVAL;
 233
 234	nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs,
 235				     sizeof(*nic_dev->rxqs), GFP_KERNEL);
 236	if (!nic_dev->rxqs)
 237		return -ENOMEM;
 238
 239	hinic_rq_dbgfs_init(nic_dev);
 240
 241	for (i = 0; i < num_rxqs; i++) {
 242		struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
 243
 244		err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev);
 245		if (err) {
 246			netif_err(nic_dev, drv, netdev,
 247				  "Failed to init rxq\n");
 248			goto err_init_rxq;
 249		}
 250
 251		err = hinic_rq_debug_add(nic_dev, i);
 252		if (err) {
 253			netif_err(nic_dev, drv, netdev,
 254				  "Failed to add RQ%d debug\n", i);
 255			goto err_add_rq_dbg;
 256		}
 257	}
 258
 259	return 0;
 260
 261err_add_rq_dbg:
 262	hinic_clean_rxq(&nic_dev->rxqs[i]);
 263err_init_rxq:
 264	for (j = 0; j < i; j++) {
 265		hinic_rq_debug_rem(nic_dev->rxqs[j].rq);
 266		hinic_clean_rxq(&nic_dev->rxqs[j]);
 267	}
 268
 269	hinic_rq_dbgfs_uninit(nic_dev);
 270
 271	devm_kfree(&netdev->dev, nic_dev->rxqs);
 272	nic_dev->rxqs = NULL;
 273	return err;
 274}
 275
 276/**
 277 * free_rxqs - Free the Logical Rx Queues of specific NIC device
 278 * @nic_dev: the specific NIC device
 279 **/
 280static void free_rxqs(struct hinic_dev *nic_dev)
 281{
 282	int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 283	struct net_device *netdev = nic_dev->netdev;
 284
 285	if (!nic_dev->rxqs)
 286		return;
 287
 288	for (i = 0; i < num_rxqs; i++) {
 289		hinic_rq_debug_rem(nic_dev->rxqs[i].rq);
 290		hinic_clean_rxq(&nic_dev->rxqs[i]);
 291	}
 292
 293	hinic_rq_dbgfs_uninit(nic_dev);
 294
 295	devm_kfree(&netdev->dev, nic_dev->rxqs);
 296	nic_dev->rxqs = NULL;
 297}
 298
 299static int hinic_configure_max_qnum(struct hinic_dev *nic_dev)
 300{
 301	return hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
 
 
 
 
 
 
 302}
 303
 304static int hinic_rss_init(struct hinic_dev *nic_dev)
 305{
 306	u8 default_rss_key[HINIC_RSS_KEY_SIZE];
 307	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
 308	u32 *indir_tbl;
 309	int err, i;
 310
 311	indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL);
 312	if (!indir_tbl)
 313		return -ENOMEM;
 314
 315	netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key));
 316	for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
 317		indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss);
 318
 319	err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key);
 320	if (err)
 321		goto out;
 322
 323	err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl);
 324	if (err)
 325		goto out;
 326
 327	err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type);
 328	if (err)
 329		goto out;
 330
 331	err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx,
 332					nic_dev->rss_hash_engine);
 333	if (err)
 334		goto out;
 335
 336	err = hinic_rss_cfg(nic_dev, 1, tmpl_idx);
 337	if (err)
 338		goto out;
 339
 340out:
 341	kfree(indir_tbl);
 342	return err;
 343}
 344
 345static void hinic_rss_deinit(struct hinic_dev *nic_dev)
 346{
 347	hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx);
 348}
 349
 350static void hinic_init_rss_parameters(struct hinic_dev *nic_dev)
 351{
 352	nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR;
 353	nic_dev->rss_type.tcp_ipv6_ext = 1;
 354	nic_dev->rss_type.ipv6_ext = 1;
 355	nic_dev->rss_type.tcp_ipv6 = 1;
 356	nic_dev->rss_type.ipv6 = 1;
 357	nic_dev->rss_type.tcp_ipv4 = 1;
 358	nic_dev->rss_type.ipv4 = 1;
 359	nic_dev->rss_type.udp_ipv6 = 1;
 360	nic_dev->rss_type.udp_ipv4 = 1;
 361}
 362
 363static void hinic_enable_rss(struct hinic_dev *nic_dev)
 364{
 365	struct net_device *netdev = nic_dev->netdev;
 366	struct hinic_hwdev *hwdev = nic_dev->hwdev;
 367	struct hinic_hwif *hwif = hwdev->hwif;
 368	struct pci_dev *pdev = hwif->pdev;
 369	int i, node, err = 0;
 370	u16 num_cpus = 0;
 371
 
 372	if (nic_dev->max_qps <= 1) {
 373		nic_dev->flags &= ~HINIC_RSS_ENABLE;
 374		nic_dev->rss_limit = nic_dev->max_qps;
 375		nic_dev->num_qps = nic_dev->max_qps;
 376		nic_dev->num_rss = nic_dev->max_qps;
 377
 378		return;
 379	}
 380
 381	err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx);
 382	if (err) {
 383		netif_err(nic_dev, drv, netdev,
 384			  "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n");
 385		nic_dev->flags &= ~HINIC_RSS_ENABLE;
 386		nic_dev->max_qps = 1;
 387		nic_dev->rss_limit = nic_dev->max_qps;
 388		nic_dev->num_qps = nic_dev->max_qps;
 389		nic_dev->num_rss = nic_dev->max_qps;
 390
 391		return;
 392	}
 393
 394	nic_dev->flags |= HINIC_RSS_ENABLE;
 395
 396	for (i = 0; i < num_online_cpus(); i++) {
 397		node = cpu_to_node(i);
 398		if (node == dev_to_node(&pdev->dev))
 399			num_cpus++;
 400	}
 401
 402	if (!num_cpus)
 403		num_cpus = num_online_cpus();
 404
 405	nic_dev->num_qps = hinic_hwdev_num_qps(hwdev);
 406	nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
 407
 408	nic_dev->rss_limit = nic_dev->num_qps;
 409	nic_dev->num_rss = nic_dev->num_qps;
 410
 411	hinic_init_rss_parameters(nic_dev);
 412	err = hinic_rss_init(nic_dev);
 413	if (err)
 414		netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
 415}
 416
 417int hinic_open(struct net_device *netdev)
 418{
 419	struct hinic_dev *nic_dev = netdev_priv(netdev);
 420	enum hinic_port_link_state link_state;
 421	int err, ret;
 422
 423	if (!(nic_dev->flags & HINIC_INTF_UP)) {
 424		err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth,
 425				       nic_dev->rq_depth);
 426		if (err) {
 427			netif_err(nic_dev, drv, netdev,
 428				  "Failed - HW interface up\n");
 429			return err;
 430		}
 431	}
 432
 433	err = create_txqs(nic_dev);
 434	if (err) {
 435		netif_err(nic_dev, drv, netdev,
 436			  "Failed to create Tx queues\n");
 437		goto err_create_txqs;
 438	}
 439
 440	enable_txqs_napi(nic_dev);
 441
 442	err = create_rxqs(nic_dev);
 443	if (err) {
 444		netif_err(nic_dev, drv, netdev,
 445			  "Failed to create Rx queues\n");
 446		goto err_create_rxqs;
 447	}
 448
 449	hinic_enable_rss(nic_dev);
 450
 451	err = hinic_configure_max_qnum(nic_dev);
 452	if (err) {
 453		netif_err(nic_dev, drv, nic_dev->netdev,
 454			  "Failed to configure the maximum number of queues\n");
 455		goto err_port_state;
 456	}
 457
 458	netif_set_real_num_tx_queues(netdev, nic_dev->num_qps);
 459	netif_set_real_num_rx_queues(netdev, nic_dev->num_qps);
 460
 461	err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE);
 462	if (err) {
 463		netif_err(nic_dev, drv, netdev,
 464			  "Failed to set port state\n");
 465		goto err_port_state;
 466	}
 467
 468	err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE);
 469	if (err) {
 470		netif_err(nic_dev, drv, netdev,
 471			  "Failed to set func port state\n");
 472		goto err_func_port_state;
 473	}
 474
 
 
 
 475	down(&nic_dev->mgmt_lock);
 476
 477	err = hinic_port_link_state(nic_dev, &link_state);
 478	if (err) {
 479		netif_err(nic_dev, drv, netdev, "Failed to get link state\n");
 480		goto err_port_link;
 481	}
 482
 483	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
 484		hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state);
 485
 486	if (link_state == HINIC_LINK_STATE_UP) {
 487		nic_dev->flags |= HINIC_LINK_UP;
 488		nic_dev->cable_unplugged = false;
 489		nic_dev->module_unrecognized = false;
 490	}
 491
 492	nic_dev->flags |= HINIC_INTF_UP;
 493
 494	if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
 495	    (HINIC_LINK_UP | HINIC_INTF_UP)) {
 496		netif_info(nic_dev, drv, netdev, "link + intf UP\n");
 497		netif_carrier_on(netdev);
 498		netif_tx_wake_all_queues(netdev);
 499	}
 500
 501	up(&nic_dev->mgmt_lock);
 502
 503	netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n");
 504	return 0;
 505
 506err_port_link:
 507	up(&nic_dev->mgmt_lock);
 508	ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
 509	if (ret)
 510		netif_warn(nic_dev, drv, netdev,
 511			   "Failed to revert func port state\n");
 512
 513err_func_port_state:
 514	ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
 515	if (ret)
 516		netif_warn(nic_dev, drv, netdev,
 517			   "Failed to revert port state\n");
 518err_port_state:
 519	free_rxqs(nic_dev);
 520	if (nic_dev->flags & HINIC_RSS_ENABLE) {
 521		hinic_rss_deinit(nic_dev);
 522		hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
 523	}
 524
 525err_create_rxqs:
 526	disable_txqs_napi(nic_dev);
 527	free_txqs(nic_dev);
 528
 529err_create_txqs:
 530	if (!(nic_dev->flags & HINIC_INTF_UP))
 531		hinic_hwdev_ifdown(nic_dev->hwdev);
 532	return err;
 533}
 534
 535int hinic_close(struct net_device *netdev)
 536{
 537	struct hinic_dev *nic_dev = netdev_priv(netdev);
 538	unsigned int flags;
 539
 540	/* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */
 541	disable_txqs_napi(nic_dev);
 542
 543	down(&nic_dev->mgmt_lock);
 544
 545	flags = nic_dev->flags;
 546	nic_dev->flags &= ~HINIC_INTF_UP;
 547
 548	netif_carrier_off(netdev);
 549	netif_tx_disable(netdev);
 550
 551	up(&nic_dev->mgmt_lock);
 552
 553	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
 554		hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
 555
 556	hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
 
 
 
 
 
 
 557
 558	hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
 
 
 
 
 
 559
 560	if (nic_dev->flags & HINIC_RSS_ENABLE) {
 561		hinic_rss_deinit(nic_dev);
 562		hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
 563	}
 564
 565	free_rxqs(nic_dev);
 566	free_txqs(nic_dev);
 567
 568	if (flags & HINIC_INTF_UP)
 569		hinic_hwdev_ifdown(nic_dev->hwdev);
 570
 571	netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n");
 572	return 0;
 573}
 574
 575static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
 576{
 577	struct hinic_dev *nic_dev = netdev_priv(netdev);
 578	int err;
 579
 580	netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu);
 581
 582	err = hinic_port_set_mtu(nic_dev, new_mtu);
 583	if (err)
 584		netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
 585	else
 586		WRITE_ONCE(netdev->mtu, new_mtu);
 587
 588	return err;
 589}
 590
 591/**
 592 * change_mac_addr - change the main mac address of network device
 593 * @netdev: network device
 594 * @addr: mac address to set
 595 *
 596 * Return 0 - Success, negative - Failure
 597 **/
 598static int change_mac_addr(struct net_device *netdev, const u8 *addr)
 599{
 600	struct hinic_dev *nic_dev = netdev_priv(netdev);
 601	u16 vid = 0;
 602	int err;
 603
 604	if (!is_valid_ether_addr(addr))
 605		return -EADDRNOTAVAIL;
 606
 607	netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n",
 608		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
 609
 610	down(&nic_dev->mgmt_lock);
 611
 612	do {
 613		err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid);
 614		if (err) {
 615			netif_err(nic_dev, drv, netdev,
 616				  "Failed to delete mac\n");
 617			break;
 618		}
 619
 620		err = hinic_port_add_mac(nic_dev, addr, vid);
 621		if (err) {
 622			netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
 623			break;
 624		}
 625
 626		vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
 627	} while (vid != VLAN_N_VID);
 628
 629	up(&nic_dev->mgmt_lock);
 630	return err;
 631}
 632
 633static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
 634{
 635	unsigned char new_mac[ETH_ALEN];
 636	struct sockaddr *saddr = addr;
 637	int err;
 638
 639	memcpy(new_mac, saddr->sa_data, ETH_ALEN);
 640
 641	err = change_mac_addr(netdev, new_mac);
 642	if (!err)
 643		eth_hw_addr_set(netdev, new_mac);
 644
 645	return err;
 646}
 647
 648/**
 649 * add_mac_addr - add mac address to network device
 650 * @netdev: network device
 651 * @addr: mac address to add
 652 *
 653 * Return 0 - Success, negative - Failure
 654 **/
 655static int add_mac_addr(struct net_device *netdev, const u8 *addr)
 656{
 657	struct hinic_dev *nic_dev = netdev_priv(netdev);
 658	u16 vid = 0;
 659	int err;
 660
 661	netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n",
 662		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
 663
 664	down(&nic_dev->mgmt_lock);
 665
 666	do {
 667		err = hinic_port_add_mac(nic_dev, addr, vid);
 668		if (err) {
 669			netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
 670			break;
 671		}
 672
 673		vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
 674	} while (vid != VLAN_N_VID);
 675
 676	up(&nic_dev->mgmt_lock);
 677	return err;
 678}
 679
 680/**
 681 * remove_mac_addr - remove mac address from network device
 682 * @netdev: network device
 683 * @addr: mac address to remove
 684 *
 685 * Return 0 - Success, negative - Failure
 686 **/
 687static int remove_mac_addr(struct net_device *netdev, const u8 *addr)
 688{
 689	struct hinic_dev *nic_dev = netdev_priv(netdev);
 690	u16 vid = 0;
 691	int err;
 692
 693	if (!is_valid_ether_addr(addr))
 694		return -EADDRNOTAVAIL;
 695
 696	netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n",
 697		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
 698
 699	down(&nic_dev->mgmt_lock);
 700
 701	do {
 702		err = hinic_port_del_mac(nic_dev, addr, vid);
 703		if (err) {
 704			netif_err(nic_dev, drv, netdev,
 705				  "Failed to delete mac\n");
 706			break;
 707		}
 708
 709		vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
 710	} while (vid != VLAN_N_VID);
 711
 712	up(&nic_dev->mgmt_lock);
 713	return err;
 714}
 715
 716static int hinic_vlan_rx_add_vid(struct net_device *netdev,
 717				 __always_unused __be16 proto, u16 vid)
 718{
 719	struct hinic_dev *nic_dev = netdev_priv(netdev);
 720	int ret, err;
 721
 722	netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid);
 723
 724	down(&nic_dev->mgmt_lock);
 725
 726	err = hinic_port_add_vlan(nic_dev, vid);
 727	if (err) {
 728		netif_err(nic_dev, drv, netdev, "Failed to add vlan\n");
 729		goto err_vlan_add;
 730	}
 731
 732	err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid);
 733	if (err && err != HINIC_PF_SET_VF_ALREADY) {
 734		netif_err(nic_dev, drv, netdev, "Failed to set mac\n");
 735		goto err_add_mac;
 736	}
 737
 738	bitmap_set(nic_dev->vlan_bitmap, vid, 1);
 739
 740	up(&nic_dev->mgmt_lock);
 741	return 0;
 742
 743err_add_mac:
 744	ret = hinic_port_del_vlan(nic_dev, vid);
 745	if (ret)
 746		netif_err(nic_dev, drv, netdev,
 747			  "Failed to revert by removing vlan\n");
 748
 749err_vlan_add:
 750	up(&nic_dev->mgmt_lock);
 751	return err;
 752}
 753
 754static int hinic_vlan_rx_kill_vid(struct net_device *netdev,
 755				  __always_unused __be16 proto, u16 vid)
 756{
 757	struct hinic_dev *nic_dev = netdev_priv(netdev);
 758	int err;
 759
 760	netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid);
 761
 762	down(&nic_dev->mgmt_lock);
 763
 764	err = hinic_port_del_vlan(nic_dev, vid);
 765	if (err) {
 766		netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n");
 767		goto err_del_vlan;
 768	}
 769
 770	bitmap_clear(nic_dev->vlan_bitmap, vid, 1);
 771
 772	up(&nic_dev->mgmt_lock);
 773	return 0;
 774
 775err_del_vlan:
 776	up(&nic_dev->mgmt_lock);
 777	return err;
 778}
 779
 780static void set_rx_mode(struct work_struct *work)
 781{
 782	struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
 783	struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
 784
 
 
 785	hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode);
 786
 787	__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
 788	__dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
 789}
 790
 791static void hinic_set_rx_mode(struct net_device *netdev)
 792{
 793	struct hinic_dev *nic_dev = netdev_priv(netdev);
 794	struct hinic_rx_mode_work *rx_mode_work;
 795	u32 rx_mode;
 796
 797	rx_mode_work = &nic_dev->rx_mode_work;
 798
 799	rx_mode = HINIC_RX_MODE_UC |
 800		  HINIC_RX_MODE_MC |
 801		  HINIC_RX_MODE_BC;
 802
 803	if (netdev->flags & IFF_PROMISC) {
 804		if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
 805			rx_mode |= HINIC_RX_MODE_PROMISC;
 806	} else if (netdev->flags & IFF_ALLMULTI) {
 807		rx_mode |= HINIC_RX_MODE_MC_ALL;
 808	}
 809
 810	rx_mode_work->rx_mode = rx_mode;
 811
 812	queue_work(nic_dev->workq, &rx_mode_work->work);
 813}
 814
 815static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
 816{
 817	struct hinic_dev *nic_dev = netdev_priv(netdev);
 818	u16 sw_pi, hw_ci, sw_ci;
 819	struct hinic_sq *sq;
 820	u16 num_sqs, q_id;
 821
 822	num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 823
 824	netif_err(nic_dev, drv, netdev, "Tx timeout\n");
 825
 826	for (q_id = 0; q_id < num_sqs; q_id++) {
 827		if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id)))
 828			continue;
 829
 830		sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id);
 831		sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask;
 832		hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask;
 833		sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask;
 834		netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n",
 835			  q_id, sw_pi, hw_ci, sw_ci,
 836			  nic_dev->txqs[q_id].napi.state);
 837	}
 838}
 839
 840static void hinic_get_stats64(struct net_device *netdev,
 841			      struct rtnl_link_stats64 *stats)
 842{
 843	struct hinic_dev *nic_dev = netdev_priv(netdev);
 844	struct hinic_rxq_stats nic_rx_stats = {};
 845	struct hinic_txq_stats nic_tx_stats = {};
 
 
 
 
 
 846
 847	if (nic_dev->flags & HINIC_INTF_UP)
 848		gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats);
 849
 850	stats->rx_bytes   = nic_rx_stats.bytes;
 851	stats->rx_packets = nic_rx_stats.pkts;
 852	stats->rx_errors  = nic_rx_stats.errors;
 853
 854	stats->tx_bytes   = nic_tx_stats.bytes;
 855	stats->tx_packets = nic_tx_stats.pkts;
 856	stats->tx_errors  = nic_tx_stats.tx_dropped;
 
 
 857}
 858
 859static int hinic_set_features(struct net_device *netdev,
 860			      netdev_features_t features)
 861{
 862	struct hinic_dev *nic_dev = netdev_priv(netdev);
 863
 864	return set_features(nic_dev, nic_dev->netdev->features,
 865			    features, false);
 866}
 867
 868static netdev_features_t hinic_fix_features(struct net_device *netdev,
 869					    netdev_features_t features)
 870{
 871	struct hinic_dev *nic_dev = netdev_priv(netdev);
 872
 873	/* If Rx checksum is disabled, then LRO should also be disabled */
 874	if (!(features & NETIF_F_RXCSUM)) {
 875		netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n");
 876		features &= ~NETIF_F_LRO;
 877	}
 878
 879	return features;
 880}
 881
 882static const struct net_device_ops hinic_netdev_ops = {
 883	.ndo_open = hinic_open,
 884	.ndo_stop = hinic_close,
 885	.ndo_change_mtu = hinic_change_mtu,
 886	.ndo_set_mac_address = hinic_set_mac_addr,
 887	.ndo_validate_addr = eth_validate_addr,
 888	.ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
 889	.ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
 890	.ndo_set_rx_mode = hinic_set_rx_mode,
 891	.ndo_start_xmit = hinic_xmit_frame,
 892	.ndo_tx_timeout = hinic_tx_timeout,
 893	.ndo_get_stats64 = hinic_get_stats64,
 894	.ndo_fix_features = hinic_fix_features,
 895	.ndo_set_features = hinic_set_features,
 896	.ndo_set_vf_mac	= hinic_ndo_set_vf_mac,
 897	.ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
 898	.ndo_get_vf_config = hinic_ndo_get_vf_config,
 899	.ndo_set_vf_trust = hinic_ndo_set_vf_trust,
 900	.ndo_set_vf_rate = hinic_ndo_set_vf_bw,
 901	.ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
 902	.ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
 903};
 904
 905static const struct net_device_ops hinicvf_netdev_ops = {
 906	.ndo_open = hinic_open,
 907	.ndo_stop = hinic_close,
 908	.ndo_change_mtu = hinic_change_mtu,
 909	.ndo_set_mac_address = hinic_set_mac_addr,
 910	.ndo_validate_addr = eth_validate_addr,
 911	.ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
 912	.ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
 913	.ndo_set_rx_mode = hinic_set_rx_mode,
 914	.ndo_start_xmit = hinic_xmit_frame,
 915	.ndo_tx_timeout = hinic_tx_timeout,
 916	.ndo_get_stats64 = hinic_get_stats64,
 917	.ndo_fix_features = hinic_fix_features,
 918	.ndo_set_features = hinic_set_features,
 919};
 920
 921static void netdev_features_init(struct net_device *netdev)
 922{
 923	netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
 924			      NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
 925			      NETIF_F_RXCSUM | NETIF_F_LRO |
 926			      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
 927			      NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
 928
 929	netdev->vlan_features = netdev->hw_features;
 930
 931	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
 932
 933	netdev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SCTP_CRC |
 934				  NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
 935				  NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_UDP_TUNNEL;
 936}
 937
 938static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
 939{
 940	struct hinic_nic_cfg *nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
 941	struct hinic_pause_config pause_info = {0};
 942	struct hinic_port_cap port_cap = {0};
 943
 944	if (hinic_port_get_cap(nic_dev, &port_cap))
 945		return;
 946
 947	mutex_lock(&nic_cfg->cfg_mutex);
 948	if (nic_cfg->pause_set || !port_cap.autoneg_state) {
 949		nic_cfg->auto_neg = port_cap.autoneg_state;
 950		pause_info.auto_neg = nic_cfg->auto_neg;
 951		pause_info.rx_pause = nic_cfg->rx_pause;
 952		pause_info.tx_pause = nic_cfg->tx_pause;
 953		hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
 954	}
 955	mutex_unlock(&nic_cfg->cfg_mutex);
 956}
 957
 958/**
 959 * link_status_event_handler - link event handler
 960 * @handle: nic device for the handler
 961 * @buf_in: input buffer
 962 * @in_size: input size
 963 * @buf_out: output buffer
 964 * @out_size: returned output size
 
 
 965 **/
 966static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
 967				      void *buf_out, u16 *out_size)
 968{
 969	struct hinic_port_link_status *link_status, *ret_link_status;
 970	struct hinic_dev *nic_dev = handle;
 971
 972	link_status = buf_in;
 973
 974	if (link_status->link == HINIC_LINK_STATE_UP) {
 975		down(&nic_dev->mgmt_lock);
 976
 977		nic_dev->flags |= HINIC_LINK_UP;
 978		nic_dev->cable_unplugged = false;
 979		nic_dev->module_unrecognized = false;
 980
 981		if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
 982		    (HINIC_LINK_UP | HINIC_INTF_UP)) {
 983			netif_carrier_on(nic_dev->netdev);
 984			netif_tx_wake_all_queues(nic_dev->netdev);
 985		}
 986
 987		up(&nic_dev->mgmt_lock);
 988
 989		if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
 990			hinic_refresh_nic_cfg(nic_dev);
 991
 992		netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n");
 993	} else {
 994		down(&nic_dev->mgmt_lock);
 995
 996		nic_dev->flags &= ~HINIC_LINK_UP;
 997
 998		netif_carrier_off(nic_dev->netdev);
 999		netif_tx_disable(nic_dev->netdev);
1000
1001		up(&nic_dev->mgmt_lock);
1002
1003		netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n");
1004	}
1005
1006	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
1007		hinic_notify_all_vfs_link_changed(nic_dev->hwdev,
1008						  link_status->link);
1009
1010	ret_link_status = buf_out;
1011	ret_link_status->status = 0;
1012
1013	*out_size = sizeof(*ret_link_status);
1014}
1015
1016static void cable_plug_event(void *handle,
1017			     void *buf_in, u16 in_size,
1018			     void *buf_out, u16 *out_size)
1019{
1020	struct hinic_cable_plug_event *plug_event = buf_in;
1021	struct hinic_dev *nic_dev = handle;
1022
1023	nic_dev->cable_unplugged = plug_event->plugged ? false : true;
1024
1025	*out_size = sizeof(*plug_event);
1026	plug_event = buf_out;
1027	plug_event->status = 0;
1028}
1029
1030static void link_err_event(void *handle,
1031			   void *buf_in, u16 in_size,
1032			   void *buf_out, u16 *out_size)
1033{
1034	struct hinic_link_err_event *link_err = buf_in;
1035	struct hinic_dev *nic_dev = handle;
1036
1037	if (link_err->err_type >= LINK_ERR_NUM)
1038		netif_info(nic_dev, link, nic_dev->netdev,
1039			   "Link failed, Unknown error type: 0x%x\n",
1040			   link_err->err_type);
1041	else
1042		nic_dev->module_unrecognized = true;
1043
1044	*out_size = sizeof(*link_err);
1045	link_err = buf_out;
1046	link_err->status = 0;
1047}
1048
1049static int set_features(struct hinic_dev *nic_dev,
1050			netdev_features_t pre_features,
1051			netdev_features_t features, bool force_change)
1052{
1053	netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
1054	u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
1055	netdev_features_t failed_features = 0;
1056	int ret = 0;
1057	int err = 0;
1058
1059	if (changed & NETIF_F_TSO) {
1060		ret = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
1061					 HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
1062		if (ret) {
1063			err = ret;
1064			failed_features |= NETIF_F_TSO;
1065		}
1066	}
1067
1068	if (changed & NETIF_F_RXCSUM) {
1069		ret = hinic_set_rx_csum_offload(nic_dev, csum_en);
1070		if (ret) {
1071			err = ret;
1072			failed_features |= NETIF_F_RXCSUM;
1073		}
1074	}
1075
1076	if (changed & NETIF_F_LRO) {
1077		ret = hinic_set_rx_lro_state(nic_dev,
1078					     !!(features & NETIF_F_LRO),
1079					     HINIC_LRO_RX_TIMER_DEFAULT,
1080					     HINIC_LRO_MAX_WQE_NUM_DEFAULT);
1081		if (ret) {
1082			err = ret;
1083			failed_features |= NETIF_F_LRO;
1084		}
1085	}
1086
1087	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1088		ret = hinic_set_rx_vlan_offload(nic_dev,
1089						!!(features &
1090						   NETIF_F_HW_VLAN_CTAG_RX));
1091		if (ret) {
1092			err = ret;
1093			failed_features |= NETIF_F_HW_VLAN_CTAG_RX;
1094		}
1095	}
1096
1097	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
1098		ret = hinic_set_vlan_fliter(nic_dev,
1099					    !!(features &
1100					       NETIF_F_HW_VLAN_CTAG_FILTER));
1101		if (ret) {
1102			err = ret;
1103			failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1104		}
1105	}
1106
1107	if (err) {
1108		nic_dev->netdev->features = features ^ failed_features;
1109		return -EIO;
1110	}
1111
1112	return 0;
1113}
1114
1115static int hinic_init_intr_coalesce(struct hinic_dev *nic_dev)
1116{
1117	u64 size;
1118	u16 i;
1119
1120	size = sizeof(struct hinic_intr_coal_info) * nic_dev->max_qps;
1121	nic_dev->rx_intr_coalesce = kzalloc(size, GFP_KERNEL);
1122	if (!nic_dev->rx_intr_coalesce)
1123		return -ENOMEM;
1124	nic_dev->tx_intr_coalesce = kzalloc(size, GFP_KERNEL);
1125	if (!nic_dev->tx_intr_coalesce) {
1126		kfree(nic_dev->rx_intr_coalesce);
1127		return -ENOMEM;
1128	}
1129
1130	for (i = 0; i < nic_dev->max_qps; i++) {
1131		nic_dev->rx_intr_coalesce[i].pending_limt =
1132			HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
1133		nic_dev->rx_intr_coalesce[i].coalesce_timer_cfg =
1134			HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
1135		nic_dev->rx_intr_coalesce[i].resend_timer_cfg =
1136			HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
1137		nic_dev->tx_intr_coalesce[i].pending_limt =
1138			HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
1139		nic_dev->tx_intr_coalesce[i].coalesce_timer_cfg =
1140			HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
1141		nic_dev->tx_intr_coalesce[i].resend_timer_cfg =
1142			HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
1143	}
1144
1145	return 0;
1146}
1147
1148static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
1149{
1150	kfree(nic_dev->tx_intr_coalesce);
1151	kfree(nic_dev->rx_intr_coalesce);
1152}
1153
1154/**
1155 * nic_dev_init - Initialize the NIC device
1156 * @pdev: the NIC pci device
1157 *
1158 * Return 0 - Success, negative - Failure
1159 **/
1160static int nic_dev_init(struct pci_dev *pdev)
1161{
1162	struct hinic_rx_mode_work *rx_mode_work;
 
 
1163	struct hinic_dev *nic_dev;
1164	struct net_device *netdev;
1165	struct hinic_hwdev *hwdev;
1166	struct devlink *devlink;
1167	u8 addr[ETH_ALEN];
1168	int err, num_qps;
1169
1170	devlink = hinic_devlink_alloc(&pdev->dev);
1171	if (!devlink) {
1172		dev_err(&pdev->dev, "Hinic devlink alloc failed\n");
1173		return -ENOMEM;
1174	}
1175
1176	hwdev = hinic_init_hwdev(pdev, devlink);
1177	if (IS_ERR(hwdev)) {
1178		dev_err(&pdev->dev, "Failed to initialize HW device\n");
1179		hinic_devlink_free(devlink);
1180		return PTR_ERR(hwdev);
1181	}
1182
1183	num_qps = hinic_hwdev_num_qps(hwdev);
1184	if (num_qps <= 0) {
1185		dev_err(&pdev->dev, "Invalid number of QPS\n");
1186		err = -EINVAL;
1187		goto err_num_qps;
1188	}
1189
1190	netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps);
1191	if (!netdev) {
1192		dev_err(&pdev->dev, "Failed to allocate Ethernet device\n");
1193		err = -ENOMEM;
1194		goto err_alloc_etherdev;
1195	}
1196
1197	if (!HINIC_IS_VF(hwdev->hwif))
1198		netdev->netdev_ops = &hinic_netdev_ops;
1199	else
1200		netdev->netdev_ops = &hinicvf_netdev_ops;
1201
1202	netdev->max_mtu = HINIC_MAX_MTU_SIZE;
1203	netdev->min_mtu = HINIC_MIN_MTU_SIZE;
1204
1205	nic_dev = netdev_priv(netdev);
1206	nic_dev->netdev = netdev;
1207	nic_dev->hwdev  = hwdev;
1208	nic_dev->msg_enable = MSG_ENABLE_DEFAULT;
1209	nic_dev->flags = 0;
1210	nic_dev->txqs = NULL;
1211	nic_dev->rxqs = NULL;
1212	nic_dev->tx_weight = tx_weight;
1213	nic_dev->rx_weight = rx_weight;
1214	nic_dev->sq_depth = HINIC_SQ_DEPTH;
1215	nic_dev->rq_depth = HINIC_RQ_DEPTH;
1216	nic_dev->sriov_info.hwdev = hwdev;
1217	nic_dev->sriov_info.pdev = pdev;
1218	nic_dev->max_qps = num_qps;
1219	nic_dev->devlink = devlink;
1220
1221	hinic_set_ethtool_ops(netdev);
1222
1223	sema_init(&nic_dev->mgmt_lock, 1);
1224
1225	nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID,
1226						  GFP_KERNEL);
 
 
 
 
 
 
 
1227	if (!nic_dev->vlan_bitmap) {
1228		err = -ENOMEM;
1229		goto err_vlan_bitmap;
1230	}
1231
1232	nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME);
1233	if (!nic_dev->workq) {
1234		err = -ENOMEM;
1235		goto err_workq;
1236	}
1237
1238	pci_set_drvdata(pdev, netdev);
1239
1240	err = hinic_port_get_mac(nic_dev, addr);
1241	if (err) {
1242		dev_err(&pdev->dev, "Failed to get mac address\n");
1243		goto err_get_mac;
1244	}
1245	eth_hw_addr_set(netdev, addr);
1246
1247	if (!is_valid_ether_addr(netdev->dev_addr)) {
1248		if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
1249			dev_err(&pdev->dev, "Invalid MAC address\n");
1250			err = -EIO;
1251			goto err_add_mac;
1252		}
1253
1254		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
1255			 netdev->dev_addr);
1256		eth_hw_addr_random(netdev);
1257	}
1258
1259	err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0);
1260	if (err && err != HINIC_PF_SET_VF_ALREADY) {
1261		dev_err(&pdev->dev, "Failed to add mac\n");
1262		goto err_add_mac;
1263	}
1264
1265	err = hinic_port_set_mtu(nic_dev, netdev->mtu);
1266	if (err) {
1267		dev_err(&pdev->dev, "Failed to set mtu\n");
1268		goto err_set_mtu;
1269	}
1270
1271	rx_mode_work = &nic_dev->rx_mode_work;
1272	INIT_WORK(&rx_mode_work->work, set_rx_mode);
1273
1274	netdev_features_init(netdev);
1275
1276	netif_carrier_off(netdev);
1277
1278	hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
1279				nic_dev, link_status_event_handler);
1280	hinic_hwdev_cb_register(nic_dev->hwdev,
1281				HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT,
1282				nic_dev, cable_plug_event);
1283	hinic_hwdev_cb_register(nic_dev->hwdev,
1284				HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT,
1285				nic_dev, link_err_event);
1286
1287	err = set_features(nic_dev, 0, nic_dev->netdev->features, true);
1288	if (err)
1289		goto err_set_features;
1290
1291	/* enable pause and disable pfc by default */
1292	err = hinic_dcb_set_pfc(nic_dev->hwdev, 0, 0);
1293	if (err)
1294		goto err_set_pfc;
1295
1296	SET_NETDEV_DEV(netdev, &pdev->dev);
1297
1298	err = hinic_init_intr_coalesce(nic_dev);
1299	if (err) {
1300		dev_err(&pdev->dev, "Failed to init_intr_coalesce\n");
1301		goto err_init_intr;
1302	}
1303
1304	hinic_dbg_init(nic_dev);
1305
1306	hinic_func_tbl_dbgfs_init(nic_dev);
1307
1308	err = hinic_func_table_debug_add(nic_dev);
1309	if (err) {
1310		dev_err(&pdev->dev, "Failed to add func_table debug\n");
1311		goto err_add_func_table_dbg;
1312	}
1313
1314	err = register_netdev(netdev);
1315	if (err) {
1316		dev_err(&pdev->dev, "Failed to register netdev\n");
1317		goto err_reg_netdev;
1318	}
1319
1320	return 0;
1321
1322err_reg_netdev:
1323	hinic_func_table_debug_rem(nic_dev);
1324err_add_func_table_dbg:
1325	hinic_func_tbl_dbgfs_uninit(nic_dev);
1326	hinic_dbg_uninit(nic_dev);
1327	hinic_free_intr_coalesce(nic_dev);
1328err_init_intr:
1329err_set_pfc:
1330err_set_features:
1331	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1332				  HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
1333	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1334				  HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
1335	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1336				  HINIC_MGMT_MSG_CMD_LINK_STATUS);
1337	cancel_work_sync(&rx_mode_work->work);
1338
1339err_set_mtu:
1340	hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
1341err_add_mac:
1342err_get_mac:
1343	pci_set_drvdata(pdev, NULL);
1344	destroy_workqueue(nic_dev->workq);
 
1345err_workq:
1346err_vlan_bitmap:
1347	free_netdev(netdev);
1348
1349err_alloc_etherdev:
1350err_num_qps:
1351	hinic_free_hwdev(hwdev);
1352	hinic_devlink_free(devlink);
1353	return err;
1354}
1355
1356static int hinic_probe(struct pci_dev *pdev,
1357		       const struct pci_device_id *id)
1358{
1359	int err = pci_enable_device(pdev);
1360
1361	if (err)
1362		return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n");
 
 
1363
1364	err = pci_request_regions(pdev, HINIC_DRV_NAME);
1365	if (err) {
1366		dev_err(&pdev->dev, "Failed to request PCI regions\n");
1367		goto err_pci_regions;
1368	}
1369
1370	pci_set_master(pdev);
1371
1372	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1373	if (err) {
1374		dev_err(&pdev->dev, "Failed to set DMA mask\n");
1375		goto err_dma_mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376	}
1377
1378	err = nic_dev_init(pdev);
1379	if (err) {
1380		dev_err(&pdev->dev, "Failed to initialize NIC device\n");
1381		goto err_nic_dev_init;
1382	}
1383
1384	dev_info(&pdev->dev, "HiNIC driver - probed\n");
1385	return 0;
1386
1387err_nic_dev_init:
 
1388err_dma_mask:
1389	pci_release_regions(pdev);
1390
1391err_pci_regions:
1392	pci_disable_device(pdev);
1393	return err;
1394}
1395
1396static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
1397{
1398	struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
1399	u32 loop_cnt = 0;
1400
1401	set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
1402	usleep_range(9900, 10000);
1403
1404	while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
1405		if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
1406		    !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
1407			return;
1408
1409		usleep_range(9900, 10000);
1410		loop_cnt++;
1411	}
1412}
1413
1414static void hinic_remove(struct pci_dev *pdev)
1415{
1416	struct net_device *netdev = pci_get_drvdata(pdev);
1417	struct hinic_dev *nic_dev = netdev_priv(netdev);
1418	struct devlink *devlink = nic_dev->devlink;
1419	struct hinic_rx_mode_work *rx_mode_work;
1420
1421	if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
1422		wait_sriov_cfg_complete(nic_dev);
1423		hinic_pci_sriov_disable(pdev);
1424	}
1425
1426	unregister_netdev(netdev);
1427
1428	hinic_func_table_debug_rem(nic_dev);
1429
1430	hinic_func_tbl_dbgfs_uninit(nic_dev);
1431
1432	hinic_dbg_uninit(nic_dev);
1433
1434	hinic_free_intr_coalesce(nic_dev);
1435
1436	hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
1437
1438	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1439				  HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT);
1440	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1441				  HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT);
1442	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1443				  HINIC_MGMT_MSG_CMD_LINK_STATUS);
1444
1445	rx_mode_work = &nic_dev->rx_mode_work;
1446	cancel_work_sync(&rx_mode_work->work);
1447
1448	pci_set_drvdata(pdev, NULL);
1449
1450	destroy_workqueue(nic_dev->workq);
1451
1452	hinic_free_hwdev(nic_dev->hwdev);
1453
1454	free_netdev(netdev);
1455
1456	hinic_devlink_free(devlink);
1457
1458	pci_release_regions(pdev);
1459	pci_disable_device(pdev);
1460
1461	dev_info(&pdev->dev, "HiNIC driver - removed\n");
1462}
1463
1464static void hinic_shutdown(struct pci_dev *pdev)
1465{
1466	pci_disable_device(pdev);
1467}
1468
1469static const struct pci_device_id hinic_pci_table[] = {
1470	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
1471	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
1472	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
1473	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
1474	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0},
1475	{ 0, 0}
1476};
1477MODULE_DEVICE_TABLE(pci, hinic_pci_table);
1478
1479static struct pci_driver hinic_driver = {
1480	.name           = HINIC_DRV_NAME,
1481	.id_table       = hinic_pci_table,
1482	.probe          = hinic_probe,
1483	.remove         = hinic_remove,
1484	.shutdown       = hinic_shutdown,
1485	.sriov_configure = hinic_pci_sriov_configure,
1486};
1487
1488static int __init hinic_module_init(void)
1489{
1490	int ret;
1491
1492	hinic_dbg_register_debugfs(HINIC_DRV_NAME);
1493
1494	ret = pci_register_driver(&hinic_driver);
1495	if (ret)
1496		hinic_dbg_unregister_debugfs();
1497
1498	return ret;
1499}
1500
1501static void __exit hinic_module_exit(void)
1502{
1503	pci_unregister_driver(&hinic_driver);
1504	hinic_dbg_unregister_debugfs();
1505}
1506
1507module_init(hinic_module_init);
1508module_exit(hinic_module_exit);
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Huawei HiNIC PCI Express Linux driver
   4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/moduleparam.h>
  10#include <linux/pci.h>
  11#include <linux/device.h>
  12#include <linux/errno.h>
  13#include <linux/types.h>
  14#include <linux/etherdevice.h>
  15#include <linux/netdevice.h>
  16#include <linux/slab.h>
  17#include <linux/if_vlan.h>
  18#include <linux/semaphore.h>
  19#include <linux/workqueue.h>
  20#include <net/ip.h>
 
  21#include <linux/bitops.h>
  22#include <linux/bitmap.h>
  23#include <linux/delay.h>
  24#include <linux/err.h>
  25
 
  26#include "hinic_hw_qp.h"
  27#include "hinic_hw_dev.h"
 
  28#include "hinic_port.h"
  29#include "hinic_tx.h"
  30#include "hinic_rx.h"
  31#include "hinic_dev.h"
 
  32
  33MODULE_AUTHOR("Huawei Technologies CO., Ltd");
  34MODULE_DESCRIPTION("Huawei Intelligent NIC driver");
  35MODULE_LICENSE("GPL");
  36
  37static unsigned int tx_weight = 64;
  38module_param(tx_weight, uint, 0644);
  39MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)");
  40
  41static unsigned int rx_weight = 64;
  42module_param(rx_weight, uint, 0644);
  43MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
  44
  45#define HINIC_DEV_ID_QUAD_PORT_25GE         0x1822
  46#define HINIC_DEV_ID_DUAL_PORT_100GE        0x0200
  47#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ   0x0205
  48#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ    0x0210
 
  49
  50#define HINIC_WQ_NAME                   "hinic_dev"
  51
  52#define MSG_ENABLE_DEFAULT              (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
  53					 NETIF_MSG_IFUP |                  \
  54					 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
  55
  56#define HINIC_LRO_MAX_WQE_NUM_DEFAULT	8
  57
  58#define HINIC_LRO_RX_TIMER_DEFAULT	16
  59
  60#define VLAN_BITMAP_SIZE(nic_dev)       (ALIGN(VLAN_N_VID, 8) / 8)
  61
  62#define work_to_rx_mode_work(work)      \
  63		container_of(work, struct hinic_rx_mode_work, work)
  64
  65#define rx_mode_work_to_nic_dev(rx_mode_work) \
  66		container_of(rx_mode_work, struct hinic_dev, rx_mode_work)
  67
 
 
 
 
 
 
  68static int change_mac_addr(struct net_device *netdev, const u8 *addr);
  69
  70static int set_features(struct hinic_dev *nic_dev,
  71			netdev_features_t pre_features,
  72			netdev_features_t features, bool force_change);
  73
  74static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
  75{
  76	struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats;
  77	struct hinic_rxq_stats rx_stats;
  78
  79	u64_stats_init(&rx_stats.syncp);
  80
  81	hinic_rxq_get_stats(rxq, &rx_stats);
  82
  83	u64_stats_update_begin(&nic_rx_stats->syncp);
  84	nic_rx_stats->bytes += rx_stats.bytes;
  85	nic_rx_stats->pkts  += rx_stats.pkts;
  86	nic_rx_stats->errors += rx_stats.errors;
  87	nic_rx_stats->csum_errors += rx_stats.csum_errors;
  88	nic_rx_stats->other_errors += rx_stats.other_errors;
  89	u64_stats_update_end(&nic_rx_stats->syncp);
  90
  91	hinic_rxq_clean_stats(rxq);
  92}
  93
  94static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
  95{
  96	struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats;
  97	struct hinic_txq_stats tx_stats;
  98
  99	u64_stats_init(&tx_stats.syncp);
 100
 101	hinic_txq_get_stats(txq, &tx_stats);
 102
 103	u64_stats_update_begin(&nic_tx_stats->syncp);
 104	nic_tx_stats->bytes += tx_stats.bytes;
 105	nic_tx_stats->pkts += tx_stats.pkts;
 106	nic_tx_stats->tx_busy += tx_stats.tx_busy;
 107	nic_tx_stats->tx_wake += tx_stats.tx_wake;
 108	nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
 109	nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
 110	u64_stats_update_end(&nic_tx_stats->syncp);
 111
 112	hinic_txq_clean_stats(txq);
 113}
 114
 115static void update_nic_stats(struct hinic_dev *nic_dev)
 
 
 116{
 117	int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
 118
 119	for (i = 0; i < num_qps; i++)
 120		update_rx_stats(nic_dev, &nic_dev->rxqs[i]);
 121
 122	for (i = 0; i < num_qps; i++)
 123		update_tx_stats(nic_dev, &nic_dev->txqs[i]);
 124}
 125
 126/**
 127 * create_txqs - Create the Logical Tx Queues of specific NIC device
 128 * @nic_dev: the specific NIC device
 129 *
 130 * Return 0 - Success, negative - Failure
 131 **/
 132static int create_txqs(struct hinic_dev *nic_dev)
 133{
 134	int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 135	struct net_device *netdev = nic_dev->netdev;
 136	size_t txq_size;
 137
 138	if (nic_dev->txqs)
 139		return -EINVAL;
 140
 141	txq_size = num_txqs * sizeof(*nic_dev->txqs);
 142	nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL);
 143	if (!nic_dev->txqs)
 144		return -ENOMEM;
 145
 
 
 146	for (i = 0; i < num_txqs; i++) {
 147		struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
 148
 149		err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev);
 150		if (err) {
 151			netif_err(nic_dev, drv, netdev,
 152				  "Failed to init Txq\n");
 153			goto err_init_txq;
 154		}
 
 
 
 
 
 
 
 155	}
 156
 157	return 0;
 158
 
 
 159err_init_txq:
 160	for (j = 0; j < i; j++)
 
 161		hinic_clean_txq(&nic_dev->txqs[j]);
 
 
 
 162
 163	devm_kfree(&netdev->dev, nic_dev->txqs);
 
 164	return err;
 165}
 166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 167/**
 168 * free_txqs - Free the Logical Tx Queues of specific NIC device
 169 * @nic_dev: the specific NIC device
 170 **/
 171static void free_txqs(struct hinic_dev *nic_dev)
 172{
 173	int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 174	struct net_device *netdev = nic_dev->netdev;
 175
 176	if (!nic_dev->txqs)
 177		return;
 178
 179	for (i = 0; i < num_txqs; i++)
 
 180		hinic_clean_txq(&nic_dev->txqs[i]);
 
 
 
 181
 182	devm_kfree(&netdev->dev, nic_dev->txqs);
 183	nic_dev->txqs = NULL;
 184}
 185
 186/**
 187 * create_txqs - Create the Logical Rx Queues of specific NIC device
 188 * @nic_dev: the specific NIC device
 189 *
 190 * Return 0 - Success, negative - Failure
 191 **/
 192static int create_rxqs(struct hinic_dev *nic_dev)
 193{
 194	int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 195	struct net_device *netdev = nic_dev->netdev;
 196	size_t rxq_size;
 197
 198	if (nic_dev->rxqs)
 199		return -EINVAL;
 200
 201	rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
 202	nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL);
 203	if (!nic_dev->rxqs)
 204		return -ENOMEM;
 205
 
 
 206	for (i = 0; i < num_rxqs; i++) {
 207		struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
 208
 209		err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev);
 210		if (err) {
 211			netif_err(nic_dev, drv, netdev,
 212				  "Failed to init rxq\n");
 213			goto err_init_rxq;
 214		}
 
 
 
 
 
 
 
 215	}
 216
 217	return 0;
 218
 
 
 219err_init_rxq:
 220	for (j = 0; j < i; j++)
 
 221		hinic_clean_rxq(&nic_dev->rxqs[j]);
 
 
 
 222
 223	devm_kfree(&netdev->dev, nic_dev->rxqs);
 
 224	return err;
 225}
 226
 227/**
 228 * free_txqs - Free the Logical Rx Queues of specific NIC device
 229 * @nic_dev: the specific NIC device
 230 **/
 231static void free_rxqs(struct hinic_dev *nic_dev)
 232{
 233	int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
 234	struct net_device *netdev = nic_dev->netdev;
 235
 236	if (!nic_dev->rxqs)
 237		return;
 238
 239	for (i = 0; i < num_rxqs; i++)
 
 240		hinic_clean_rxq(&nic_dev->rxqs[i]);
 
 
 
 241
 242	devm_kfree(&netdev->dev, nic_dev->rxqs);
 243	nic_dev->rxqs = NULL;
 244}
 245
 246static int hinic_configure_max_qnum(struct hinic_dev *nic_dev)
 247{
 248	int err;
 249
 250	err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
 251	if (err)
 252		return err;
 253
 254	return 0;
 255}
 256
 257static int hinic_rss_init(struct hinic_dev *nic_dev)
 258{
 259	u8 default_rss_key[HINIC_RSS_KEY_SIZE];
 260	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
 261	u32 *indir_tbl;
 262	int err, i;
 263
 264	indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL);
 265	if (!indir_tbl)
 266		return -ENOMEM;
 267
 268	netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key));
 269	for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
 270		indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss);
 271
 272	err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key);
 273	if (err)
 274		goto out;
 275
 276	err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl);
 277	if (err)
 278		goto out;
 279
 280	err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type);
 281	if (err)
 282		goto out;
 283
 284	err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx,
 285					nic_dev->rss_hash_engine);
 286	if (err)
 287		goto out;
 288
 289	err = hinic_rss_cfg(nic_dev, 1, tmpl_idx);
 290	if (err)
 291		goto out;
 292
 293out:
 294	kfree(indir_tbl);
 295	return err;
 296}
 297
 298static void hinic_rss_deinit(struct hinic_dev *nic_dev)
 299{
 300	hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx);
 301}
 302
 303static void hinic_init_rss_parameters(struct hinic_dev *nic_dev)
 304{
 305	nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR;
 306	nic_dev->rss_type.tcp_ipv6_ext = 1;
 307	nic_dev->rss_type.ipv6_ext = 1;
 308	nic_dev->rss_type.tcp_ipv6 = 1;
 309	nic_dev->rss_type.ipv6 = 1;
 310	nic_dev->rss_type.tcp_ipv4 = 1;
 311	nic_dev->rss_type.ipv4 = 1;
 312	nic_dev->rss_type.udp_ipv6 = 1;
 313	nic_dev->rss_type.udp_ipv4 = 1;
 314}
 315
 316static void hinic_enable_rss(struct hinic_dev *nic_dev)
 317{
 318	struct net_device *netdev = nic_dev->netdev;
 319	struct hinic_hwdev *hwdev = nic_dev->hwdev;
 320	struct hinic_hwif *hwif = hwdev->hwif;
 321	struct pci_dev *pdev = hwif->pdev;
 322	int i, node, err = 0;
 323	u16 num_cpus = 0;
 324
 325	nic_dev->max_qps = hinic_hwdev_max_num_qps(hwdev);
 326	if (nic_dev->max_qps <= 1) {
 327		nic_dev->flags &= ~HINIC_RSS_ENABLE;
 328		nic_dev->rss_limit = nic_dev->max_qps;
 329		nic_dev->num_qps = nic_dev->max_qps;
 330		nic_dev->num_rss = nic_dev->max_qps;
 331
 332		return;
 333	}
 334
 335	err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx);
 336	if (err) {
 337		netif_err(nic_dev, drv, netdev,
 338			  "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n");
 339		nic_dev->flags &= ~HINIC_RSS_ENABLE;
 340		nic_dev->max_qps = 1;
 341		nic_dev->rss_limit = nic_dev->max_qps;
 342		nic_dev->num_qps = nic_dev->max_qps;
 343		nic_dev->num_rss = nic_dev->max_qps;
 344
 345		return;
 346	}
 347
 348	nic_dev->flags |= HINIC_RSS_ENABLE;
 349
 350	for (i = 0; i < num_online_cpus(); i++) {
 351		node = cpu_to_node(i);
 352		if (node == dev_to_node(&pdev->dev))
 353			num_cpus++;
 354	}
 355
 356	if (!num_cpus)
 357		num_cpus = num_online_cpus();
 358
 359	nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus);
 
 360
 361	nic_dev->rss_limit = nic_dev->num_qps;
 362	nic_dev->num_rss = nic_dev->num_qps;
 363
 364	hinic_init_rss_parameters(nic_dev);
 365	err = hinic_rss_init(nic_dev);
 366	if (err)
 367		netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
 368}
 369
 370static int hinic_open(struct net_device *netdev)
 371{
 372	struct hinic_dev *nic_dev = netdev_priv(netdev);
 373	enum hinic_port_link_state link_state;
 374	int err, ret;
 375
 376	if (!(nic_dev->flags & HINIC_INTF_UP)) {
 377		err = hinic_hwdev_ifup(nic_dev->hwdev);
 
 378		if (err) {
 379			netif_err(nic_dev, drv, netdev,
 380				  "Failed - HW interface up\n");
 381			return err;
 382		}
 383	}
 384
 385	err = create_txqs(nic_dev);
 386	if (err) {
 387		netif_err(nic_dev, drv, netdev,
 388			  "Failed to create Tx queues\n");
 389		goto err_create_txqs;
 390	}
 391
 
 
 392	err = create_rxqs(nic_dev);
 393	if (err) {
 394		netif_err(nic_dev, drv, netdev,
 395			  "Failed to create Rx queues\n");
 396		goto err_create_rxqs;
 397	}
 398
 399	hinic_enable_rss(nic_dev);
 400
 401	err = hinic_configure_max_qnum(nic_dev);
 402	if (err) {
 403		netif_err(nic_dev, drv, nic_dev->netdev,
 404			  "Failed to configure the maximum number of queues\n");
 405		goto err_port_state;
 406	}
 407
 408	netif_set_real_num_tx_queues(netdev, nic_dev->num_qps);
 409	netif_set_real_num_rx_queues(netdev, nic_dev->num_qps);
 410
 411	err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE);
 412	if (err) {
 413		netif_err(nic_dev, drv, netdev,
 414			  "Failed to set port state\n");
 415		goto err_port_state;
 416	}
 417
 418	err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE);
 419	if (err) {
 420		netif_err(nic_dev, drv, netdev,
 421			  "Failed to set func port state\n");
 422		goto err_func_port_state;
 423	}
 424
 425	/* Wait up to 3 sec between port enable to link state */
 426	msleep(3000);
 427
 428	down(&nic_dev->mgmt_lock);
 429
 430	err = hinic_port_link_state(nic_dev, &link_state);
 431	if (err) {
 432		netif_err(nic_dev, drv, netdev, "Failed to get link state\n");
 433		goto err_port_link;
 434	}
 435
 436	if (link_state == HINIC_LINK_STATE_UP)
 
 
 
 437		nic_dev->flags |= HINIC_LINK_UP;
 
 
 
 438
 439	nic_dev->flags |= HINIC_INTF_UP;
 440
 441	if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
 442	    (HINIC_LINK_UP | HINIC_INTF_UP)) {
 443		netif_info(nic_dev, drv, netdev, "link + intf UP\n");
 444		netif_carrier_on(netdev);
 445		netif_tx_wake_all_queues(netdev);
 446	}
 447
 448	up(&nic_dev->mgmt_lock);
 449
 450	netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n");
 451	return 0;
 452
 453err_port_link:
 454	up(&nic_dev->mgmt_lock);
 455	ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
 456	if (ret)
 457		netif_warn(nic_dev, drv, netdev,
 458			   "Failed to revert func port state\n");
 459
 460err_func_port_state:
 461	ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
 462	if (ret)
 463		netif_warn(nic_dev, drv, netdev,
 464			   "Failed to revert port state\n");
 465err_port_state:
 466	free_rxqs(nic_dev);
 467	if (nic_dev->flags & HINIC_RSS_ENABLE) {
 468		hinic_rss_deinit(nic_dev);
 469		hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
 470	}
 471
 472err_create_rxqs:
 
 473	free_txqs(nic_dev);
 474
 475err_create_txqs:
 476	if (!(nic_dev->flags & HINIC_INTF_UP))
 477		hinic_hwdev_ifdown(nic_dev->hwdev);
 478	return err;
 479}
 480
 481static int hinic_close(struct net_device *netdev)
 482{
 483	struct hinic_dev *nic_dev = netdev_priv(netdev);
 484	unsigned int flags;
 485	int err;
 
 
 486
 487	down(&nic_dev->mgmt_lock);
 488
 489	flags = nic_dev->flags;
 490	nic_dev->flags &= ~HINIC_INTF_UP;
 491
 492	netif_carrier_off(netdev);
 493	netif_tx_disable(netdev);
 494
 495	update_nic_stats(nic_dev);
 496
 497	up(&nic_dev->mgmt_lock);
 
 498
 499	err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
 500	if (err) {
 501		netif_err(nic_dev, drv, netdev,
 502			  "Failed to set func port state\n");
 503		nic_dev->flags |= (flags & HINIC_INTF_UP);
 504		return err;
 505	}
 506
 507	err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
 508	if (err) {
 509		netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
 510		nic_dev->flags |= (flags & HINIC_INTF_UP);
 511		return err;
 512	}
 513
 514	if (nic_dev->flags & HINIC_RSS_ENABLE) {
 515		hinic_rss_deinit(nic_dev);
 516		hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx);
 517	}
 518
 519	free_rxqs(nic_dev);
 520	free_txqs(nic_dev);
 521
 522	if (flags & HINIC_INTF_UP)
 523		hinic_hwdev_ifdown(nic_dev->hwdev);
 524
 525	netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n");
 526	return 0;
 527}
 528
 529static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
 530{
 531	struct hinic_dev *nic_dev = netdev_priv(netdev);
 532	int err;
 533
 534	netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu);
 535
 536	err = hinic_port_set_mtu(nic_dev, new_mtu);
 537	if (err)
 538		netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
 539	else
 540		netdev->mtu = new_mtu;
 541
 542	return err;
 543}
 544
 545/**
 546 * change_mac_addr - change the main mac address of network device
 547 * @netdev: network device
 548 * @addr: mac address to set
 549 *
 550 * Return 0 - Success, negative - Failure
 551 **/
 552static int change_mac_addr(struct net_device *netdev, const u8 *addr)
 553{
 554	struct hinic_dev *nic_dev = netdev_priv(netdev);
 555	u16 vid = 0;
 556	int err;
 557
 558	if (!is_valid_ether_addr(addr))
 559		return -EADDRNOTAVAIL;
 560
 561	netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n",
 562		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
 563
 564	down(&nic_dev->mgmt_lock);
 565
 566	do {
 567		err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid);
 568		if (err) {
 569			netif_err(nic_dev, drv, netdev,
 570				  "Failed to delete mac\n");
 571			break;
 572		}
 573
 574		err = hinic_port_add_mac(nic_dev, addr, vid);
 575		if (err) {
 576			netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
 577			break;
 578		}
 579
 580		vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
 581	} while (vid != VLAN_N_VID);
 582
 583	up(&nic_dev->mgmt_lock);
 584	return err;
 585}
 586
 587static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
 588{
 589	unsigned char new_mac[ETH_ALEN];
 590	struct sockaddr *saddr = addr;
 591	int err;
 592
 593	memcpy(new_mac, saddr->sa_data, ETH_ALEN);
 594
 595	err = change_mac_addr(netdev, new_mac);
 596	if (!err)
 597		memcpy(netdev->dev_addr, new_mac, ETH_ALEN);
 598
 599	return err;
 600}
 601
 602/**
 603 * add_mac_addr - add mac address to network device
 604 * @netdev: network device
 605 * @addr: mac address to add
 606 *
 607 * Return 0 - Success, negative - Failure
 608 **/
 609static int add_mac_addr(struct net_device *netdev, const u8 *addr)
 610{
 611	struct hinic_dev *nic_dev = netdev_priv(netdev);
 612	u16 vid = 0;
 613	int err;
 614
 615	netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n",
 616		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
 617
 618	down(&nic_dev->mgmt_lock);
 619
 620	do {
 621		err = hinic_port_add_mac(nic_dev, addr, vid);
 622		if (err) {
 623			netif_err(nic_dev, drv, netdev, "Failed to add mac\n");
 624			break;
 625		}
 626
 627		vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
 628	} while (vid != VLAN_N_VID);
 629
 630	up(&nic_dev->mgmt_lock);
 631	return err;
 632}
 633
 634/**
 635 * remove_mac_addr - remove mac address from network device
 636 * @netdev: network device
 637 * @addr: mac address to remove
 638 *
 639 * Return 0 - Success, negative - Failure
 640 **/
 641static int remove_mac_addr(struct net_device *netdev, const u8 *addr)
 642{
 643	struct hinic_dev *nic_dev = netdev_priv(netdev);
 644	u16 vid = 0;
 645	int err;
 646
 647	if (!is_valid_ether_addr(addr))
 648		return -EADDRNOTAVAIL;
 649
 650	netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n",
 651		   addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
 652
 653	down(&nic_dev->mgmt_lock);
 654
 655	do {
 656		err = hinic_port_del_mac(nic_dev, addr, vid);
 657		if (err) {
 658			netif_err(nic_dev, drv, netdev,
 659				  "Failed to delete mac\n");
 660			break;
 661		}
 662
 663		vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1);
 664	} while (vid != VLAN_N_VID);
 665
 666	up(&nic_dev->mgmt_lock);
 667	return err;
 668}
 669
 670static int hinic_vlan_rx_add_vid(struct net_device *netdev,
 671				 __always_unused __be16 proto, u16 vid)
 672{
 673	struct hinic_dev *nic_dev = netdev_priv(netdev);
 674	int ret, err;
 675
 676	netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid);
 677
 678	down(&nic_dev->mgmt_lock);
 679
 680	err = hinic_port_add_vlan(nic_dev, vid);
 681	if (err) {
 682		netif_err(nic_dev, drv, netdev, "Failed to add vlan\n");
 683		goto err_vlan_add;
 684	}
 685
 686	err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid);
 687	if (err) {
 688		netif_err(nic_dev, drv, netdev, "Failed to set mac\n");
 689		goto err_add_mac;
 690	}
 691
 692	bitmap_set(nic_dev->vlan_bitmap, vid, 1);
 693
 694	up(&nic_dev->mgmt_lock);
 695	return 0;
 696
 697err_add_mac:
 698	ret = hinic_port_del_vlan(nic_dev, vid);
 699	if (ret)
 700		netif_err(nic_dev, drv, netdev,
 701			  "Failed to revert by removing vlan\n");
 702
 703err_vlan_add:
 704	up(&nic_dev->mgmt_lock);
 705	return err;
 706}
 707
 708static int hinic_vlan_rx_kill_vid(struct net_device *netdev,
 709				  __always_unused __be16 proto, u16 vid)
 710{
 711	struct hinic_dev *nic_dev = netdev_priv(netdev);
 712	int err;
 713
 714	netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid);
 715
 716	down(&nic_dev->mgmt_lock);
 717
 718	err = hinic_port_del_vlan(nic_dev, vid);
 719	if (err) {
 720		netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n");
 721		goto err_del_vlan;
 722	}
 723
 724	bitmap_clear(nic_dev->vlan_bitmap, vid, 1);
 725
 726	up(&nic_dev->mgmt_lock);
 727	return 0;
 728
 729err_del_vlan:
 730	up(&nic_dev->mgmt_lock);
 731	return err;
 732}
 733
 734static void set_rx_mode(struct work_struct *work)
 735{
 736	struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
 737	struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
 738
 739	netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
 740
 741	hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode);
 742
 743	__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
 744	__dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
 745}
 746
 747static void hinic_set_rx_mode(struct net_device *netdev)
 748{
 749	struct hinic_dev *nic_dev = netdev_priv(netdev);
 750	struct hinic_rx_mode_work *rx_mode_work;
 751	u32 rx_mode;
 752
 753	rx_mode_work = &nic_dev->rx_mode_work;
 754
 755	rx_mode = HINIC_RX_MODE_UC |
 756		  HINIC_RX_MODE_MC |
 757		  HINIC_RX_MODE_BC;
 758
 759	if (netdev->flags & IFF_PROMISC)
 760		rx_mode |= HINIC_RX_MODE_PROMISC;
 761	else if (netdev->flags & IFF_ALLMULTI)
 
 762		rx_mode |= HINIC_RX_MODE_MC_ALL;
 
 763
 764	rx_mode_work->rx_mode = rx_mode;
 765
 766	queue_work(nic_dev->workq, &rx_mode_work->work);
 767}
 768
 769static void hinic_tx_timeout(struct net_device *netdev)
 770{
 771	struct hinic_dev *nic_dev = netdev_priv(netdev);
 
 
 
 
 
 772
 773	netif_err(nic_dev, drv, netdev, "Tx timeout\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 774}
 775
 776static void hinic_get_stats64(struct net_device *netdev,
 777			      struct rtnl_link_stats64 *stats)
 778{
 779	struct hinic_dev *nic_dev = netdev_priv(netdev);
 780	struct hinic_rxq_stats *nic_rx_stats;
 781	struct hinic_txq_stats *nic_tx_stats;
 782
 783	nic_rx_stats = &nic_dev->rx_stats;
 784	nic_tx_stats = &nic_dev->tx_stats;
 785
 786	down(&nic_dev->mgmt_lock);
 787
 788	if (nic_dev->flags & HINIC_INTF_UP)
 789		update_nic_stats(nic_dev);
 790
 791	up(&nic_dev->mgmt_lock);
 792
 793	stats->rx_bytes   = nic_rx_stats->bytes;
 794	stats->rx_packets = nic_rx_stats->pkts;
 795	stats->rx_errors  = nic_rx_stats->errors;
 796
 797	stats->tx_bytes   = nic_tx_stats->bytes;
 798	stats->tx_packets = nic_tx_stats->pkts;
 799	stats->tx_errors  = nic_tx_stats->tx_dropped;
 800}
 801
 802static int hinic_set_features(struct net_device *netdev,
 803			      netdev_features_t features)
 804{
 805	struct hinic_dev *nic_dev = netdev_priv(netdev);
 806
 807	return set_features(nic_dev, nic_dev->netdev->features,
 808			    features, false);
 809}
 810
 811static netdev_features_t hinic_fix_features(struct net_device *netdev,
 812					    netdev_features_t features)
 813{
 814	struct hinic_dev *nic_dev = netdev_priv(netdev);
 815
 816	/* If Rx checksum is disabled, then LRO should also be disabled */
 817	if (!(features & NETIF_F_RXCSUM)) {
 818		netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n");
 819		features &= ~NETIF_F_LRO;
 820	}
 821
 822	return features;
 823}
 824
 825static const struct net_device_ops hinic_netdev_ops = {
 826	.ndo_open = hinic_open,
 827	.ndo_stop = hinic_close,
 828	.ndo_change_mtu = hinic_change_mtu,
 829	.ndo_set_mac_address = hinic_set_mac_addr,
 830	.ndo_validate_addr = eth_validate_addr,
 831	.ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
 832	.ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
 833	.ndo_set_rx_mode = hinic_set_rx_mode,
 834	.ndo_start_xmit = hinic_xmit_frame,
 835	.ndo_tx_timeout = hinic_tx_timeout,
 836	.ndo_get_stats64 = hinic_get_stats64,
 837	.ndo_fix_features = hinic_fix_features,
 838	.ndo_set_features = hinic_set_features,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 839};
 840
 841static void netdev_features_init(struct net_device *netdev)
 842{
 843	netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
 844			      NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
 845			      NETIF_F_RXCSUM | NETIF_F_LRO |
 846			      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
 847
 848	netdev->vlan_features = netdev->hw_features;
 849
 850	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 851}
 852
 853/**
 854 * link_status_event_handler - link event handler
 855 * @handle: nic device for the handler
 856 * @buf_in: input buffer
 857 * @in_size: input size
 858 * @buf_in: output buffer
 859 * @out_size: returned output size
 860 *
 861 * Return 0 - Success, negative - Failure
 862 **/
 863static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
 864				      void *buf_out, u16 *out_size)
 865{
 866	struct hinic_port_link_status *link_status, *ret_link_status;
 867	struct hinic_dev *nic_dev = handle;
 868
 869	link_status = buf_in;
 870
 871	if (link_status->link == HINIC_LINK_STATE_UP) {
 872		down(&nic_dev->mgmt_lock);
 873
 874		nic_dev->flags |= HINIC_LINK_UP;
 
 
 875
 876		if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) ==
 877		    (HINIC_LINK_UP | HINIC_INTF_UP)) {
 878			netif_carrier_on(nic_dev->netdev);
 879			netif_tx_wake_all_queues(nic_dev->netdev);
 880		}
 881
 882		up(&nic_dev->mgmt_lock);
 883
 
 
 
 884		netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n");
 885	} else {
 886		down(&nic_dev->mgmt_lock);
 887
 888		nic_dev->flags &= ~HINIC_LINK_UP;
 889
 890		netif_carrier_off(nic_dev->netdev);
 891		netif_tx_disable(nic_dev->netdev);
 892
 893		up(&nic_dev->mgmt_lock);
 894
 895		netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n");
 896	}
 897
 
 
 
 
 898	ret_link_status = buf_out;
 899	ret_link_status->status = 0;
 900
 901	*out_size = sizeof(*ret_link_status);
 902}
 903
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 904static int set_features(struct hinic_dev *nic_dev,
 905			netdev_features_t pre_features,
 906			netdev_features_t features, bool force_change)
 907{
 908	netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
 909	u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 
 
 910	int err = 0;
 911
 912	if (changed & NETIF_F_TSO)
 913		err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
 914					 HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
 
 
 
 
 
 915
 916	if (changed & NETIF_F_RXCSUM)
 917		err = hinic_set_rx_csum_offload(nic_dev, csum_en);
 
 
 
 
 
 918
 919	if (changed & NETIF_F_LRO) {
 920		err = hinic_set_rx_lro_state(nic_dev,
 921					     !!(features & NETIF_F_LRO),
 922					     HINIC_LRO_RX_TIMER_DEFAULT,
 923					     HINIC_LRO_MAX_WQE_NUM_DEFAULT);
 
 
 
 
 924	}
 925
 926	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 927		err = hinic_set_rx_vlan_offload(nic_dev,
 928						!!(features &
 929						   NETIF_F_HW_VLAN_CTAG_RX));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930
 931	return err;
 
 
 
 932}
 933
 934/**
 935 * nic_dev_init - Initialize the NIC device
 936 * @pdev: the NIC pci device
 937 *
 938 * Return 0 - Success, negative - Failure
 939 **/
 940static int nic_dev_init(struct pci_dev *pdev)
 941{
 942	struct hinic_rx_mode_work *rx_mode_work;
 943	struct hinic_txq_stats *tx_stats;
 944	struct hinic_rxq_stats *rx_stats;
 945	struct hinic_dev *nic_dev;
 946	struct net_device *netdev;
 947	struct hinic_hwdev *hwdev;
 
 
 948	int err, num_qps;
 949
 950	hwdev = hinic_init_hwdev(pdev);
 
 
 
 
 
 
 951	if (IS_ERR(hwdev)) {
 952		dev_err(&pdev->dev, "Failed to initialize HW device\n");
 
 953		return PTR_ERR(hwdev);
 954	}
 955
 956	num_qps = hinic_hwdev_num_qps(hwdev);
 957	if (num_qps <= 0) {
 958		dev_err(&pdev->dev, "Invalid number of QPS\n");
 959		err = -EINVAL;
 960		goto err_num_qps;
 961	}
 962
 963	netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps);
 964	if (!netdev) {
 965		dev_err(&pdev->dev, "Failed to allocate Ethernet device\n");
 966		err = -ENOMEM;
 967		goto err_alloc_etherdev;
 968	}
 969
 970	hinic_set_ethtool_ops(netdev);
 971	netdev->netdev_ops = &hinic_netdev_ops;
 972	netdev->max_mtu = ETH_MAX_MTU;
 
 
 
 
 973
 974	nic_dev = netdev_priv(netdev);
 975	nic_dev->netdev = netdev;
 976	nic_dev->hwdev  = hwdev;
 977	nic_dev->msg_enable = MSG_ENABLE_DEFAULT;
 978	nic_dev->flags = 0;
 979	nic_dev->txqs = NULL;
 980	nic_dev->rxqs = NULL;
 981	nic_dev->tx_weight = tx_weight;
 982	nic_dev->rx_weight = rx_weight;
 
 
 
 
 
 
 
 
 983
 984	sema_init(&nic_dev->mgmt_lock, 1);
 985
 986	tx_stats = &nic_dev->tx_stats;
 987	rx_stats = &nic_dev->rx_stats;
 988
 989	u64_stats_init(&tx_stats->syncp);
 990	u64_stats_init(&rx_stats->syncp);
 991
 992	nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev,
 993					    VLAN_BITMAP_SIZE(nic_dev),
 994					    GFP_KERNEL);
 995	if (!nic_dev->vlan_bitmap) {
 996		err = -ENOMEM;
 997		goto err_vlan_bitmap;
 998	}
 999
1000	nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME);
1001	if (!nic_dev->workq) {
1002		err = -ENOMEM;
1003		goto err_workq;
1004	}
1005
1006	pci_set_drvdata(pdev, netdev);
1007
1008	err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
1009	if (err)
1010		dev_warn(&pdev->dev, "Failed to get mac address\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1011
1012	err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0);
1013	if (err) {
1014		dev_err(&pdev->dev, "Failed to add mac\n");
1015		goto err_add_mac;
1016	}
1017
1018	err = hinic_port_set_mtu(nic_dev, netdev->mtu);
1019	if (err) {
1020		dev_err(&pdev->dev, "Failed to set mtu\n");
1021		goto err_set_mtu;
1022	}
1023
1024	rx_mode_work = &nic_dev->rx_mode_work;
1025	INIT_WORK(&rx_mode_work->work, set_rx_mode);
1026
1027	netdev_features_init(netdev);
1028
1029	netif_carrier_off(netdev);
1030
1031	hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
1032				nic_dev, link_status_event_handler);
 
 
 
 
 
 
1033
1034	err = set_features(nic_dev, 0, nic_dev->netdev->features, true);
1035	if (err)
1036		goto err_set_features;
1037
 
 
 
 
 
1038	SET_NETDEV_DEV(netdev, &pdev->dev);
1039
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040	err = register_netdev(netdev);
1041	if (err) {
1042		dev_err(&pdev->dev, "Failed to register netdev\n");
1043		goto err_reg_netdev;
1044	}
1045
1046	return 0;
1047
1048err_reg_netdev:
 
 
 
 
 
 
 
1049err_set_features:
1050	hinic_hwdev_cb_unregister(nic_dev->hwdev,
 
 
 
 
1051				  HINIC_MGMT_MSG_CMD_LINK_STATUS);
1052	cancel_work_sync(&rx_mode_work->work);
1053
1054err_set_mtu:
 
1055err_add_mac:
 
1056	pci_set_drvdata(pdev, NULL);
1057	destroy_workqueue(nic_dev->workq);
1058
1059err_workq:
1060err_vlan_bitmap:
1061	free_netdev(netdev);
1062
1063err_alloc_etherdev:
1064err_num_qps:
1065	hinic_free_hwdev(hwdev);
 
1066	return err;
1067}
1068
1069static int hinic_probe(struct pci_dev *pdev,
1070		       const struct pci_device_id *id)
1071{
1072	int err = pci_enable_device(pdev);
1073
1074	if (err) {
1075		dev_err(&pdev->dev, "Failed to enable PCI device\n");
1076		return err;
1077	}
1078
1079	err = pci_request_regions(pdev, HINIC_DRV_NAME);
1080	if (err) {
1081		dev_err(&pdev->dev, "Failed to request PCI regions\n");
1082		goto err_pci_regions;
1083	}
1084
1085	pci_set_master(pdev);
1086
1087	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1088	if (err) {
1089		dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
1090		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1091		if (err) {
1092			dev_err(&pdev->dev, "Failed to set DMA mask\n");
1093			goto err_dma_mask;
1094		}
1095	}
1096
1097	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1098	if (err) {
1099		dev_warn(&pdev->dev,
1100			 "Couldn't set 64-bit consistent DMA mask\n");
1101		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1102		if (err) {
1103			dev_err(&pdev->dev,
1104				"Failed to set consistent DMA mask\n");
1105			goto err_dma_consistent_mask;
1106		}
1107	}
1108
1109	err = nic_dev_init(pdev);
1110	if (err) {
1111		dev_err(&pdev->dev, "Failed to initialize NIC device\n");
1112		goto err_nic_dev_init;
1113	}
1114
1115	dev_info(&pdev->dev, "HiNIC driver - probed\n");
1116	return 0;
1117
1118err_nic_dev_init:
1119err_dma_consistent_mask:
1120err_dma_mask:
1121	pci_release_regions(pdev);
1122
1123err_pci_regions:
1124	pci_disable_device(pdev);
1125	return err;
1126}
1127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1128static void hinic_remove(struct pci_dev *pdev)
1129{
1130	struct net_device *netdev = pci_get_drvdata(pdev);
1131	struct hinic_dev *nic_dev = netdev_priv(netdev);
 
1132	struct hinic_rx_mode_work *rx_mode_work;
1133
 
 
 
 
 
1134	unregister_netdev(netdev);
1135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136	hinic_hwdev_cb_unregister(nic_dev->hwdev,
1137				  HINIC_MGMT_MSG_CMD_LINK_STATUS);
1138
1139	rx_mode_work = &nic_dev->rx_mode_work;
1140	cancel_work_sync(&rx_mode_work->work);
1141
1142	pci_set_drvdata(pdev, NULL);
1143
1144	destroy_workqueue(nic_dev->workq);
1145
1146	hinic_free_hwdev(nic_dev->hwdev);
1147
1148	free_netdev(netdev);
1149
 
 
1150	pci_release_regions(pdev);
1151	pci_disable_device(pdev);
1152
1153	dev_info(&pdev->dev, "HiNIC driver - removed\n");
1154}
1155
1156static void hinic_shutdown(struct pci_dev *pdev)
1157{
1158	pci_disable_device(pdev);
1159}
1160
1161static const struct pci_device_id hinic_pci_table[] = {
1162	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0},
1163	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
1164	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
1165	{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
 
1166	{ 0, 0}
1167};
1168MODULE_DEVICE_TABLE(pci, hinic_pci_table);
1169
1170static struct pci_driver hinic_driver = {
1171	.name           = HINIC_DRV_NAME,
1172	.id_table       = hinic_pci_table,
1173	.probe          = hinic_probe,
1174	.remove         = hinic_remove,
1175	.shutdown       = hinic_shutdown,
 
1176};
1177
1178module_pci_driver(hinic_driver);