Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qede NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#include <linux/crash_dump.h>
   8#include <linux/module.h>
   9#include <linux/pci.h>
  10#include <linux/device.h>
  11#include <linux/netdevice.h>
  12#include <linux/etherdevice.h>
  13#include <linux/skbuff.h>
  14#include <linux/errno.h>
  15#include <linux/list.h>
  16#include <linux/string.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/interrupt.h>
  19#include <asm/byteorder.h>
  20#include <asm/param.h>
  21#include <linux/io.h>
  22#include <linux/netdev_features.h>
  23#include <linux/udp.h>
  24#include <linux/tcp.h>
  25#include <net/udp_tunnel.h>
  26#include <linux/ip.h>
  27#include <net/ipv6.h>
  28#include <net/tcp.h>
  29#include <linux/if_ether.h>
  30#include <linux/if_vlan.h>
  31#include <linux/pkt_sched.h>
  32#include <linux/ethtool.h>
  33#include <linux/in.h>
  34#include <linux/random.h>
  35#include <net/ip6_checksum.h>
  36#include <linux/bitops.h>
  37#include <linux/vmalloc.h>
 
  38#include "qede.h"
  39#include "qede_ptp.h"
  40
 
 
 
  41MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
  42MODULE_LICENSE("GPL");
 
  43
  44static uint debug;
  45module_param(debug, uint, 0);
  46MODULE_PARM_DESC(debug, " Default debug msglevel");
  47
  48static const struct qed_eth_ops *qed_ops;
  49
  50#define CHIP_NUM_57980S_40		0x1634
  51#define CHIP_NUM_57980S_10		0x1666
  52#define CHIP_NUM_57980S_MF		0x1636
  53#define CHIP_NUM_57980S_100		0x1644
  54#define CHIP_NUM_57980S_50		0x1654
  55#define CHIP_NUM_57980S_25		0x1656
  56#define CHIP_NUM_57980S_IOV		0x1664
  57#define CHIP_NUM_AH			0x8070
  58#define CHIP_NUM_AH_IOV			0x8090
  59
  60#ifndef PCI_DEVICE_ID_NX2_57980E
  61#define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
  62#define PCI_DEVICE_ID_57980S_10		CHIP_NUM_57980S_10
  63#define PCI_DEVICE_ID_57980S_MF		CHIP_NUM_57980S_MF
  64#define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
  65#define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
  66#define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
  67#define PCI_DEVICE_ID_57980S_IOV	CHIP_NUM_57980S_IOV
  68#define PCI_DEVICE_ID_AH		CHIP_NUM_AH
  69#define PCI_DEVICE_ID_AH_IOV		CHIP_NUM_AH_IOV
  70
  71#endif
  72
  73enum qede_pci_private {
  74	QEDE_PRIVATE_PF,
  75	QEDE_PRIVATE_VF
  76};
  77
  78static const struct pci_device_id qede_pci_tbl[] = {
  79	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
  80	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
  81	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
  82	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
  83	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
  84	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
  85#ifdef CONFIG_QED_SRIOV
  86	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
  87#endif
  88	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
  89#ifdef CONFIG_QED_SRIOV
  90	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
  91#endif
  92	{ 0 }
  93};
  94
  95MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
  96
  97static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  98static pci_ers_result_t
  99qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
 100
 101#define TX_TIMEOUT		(5 * HZ)
 102
 103/* Utilize last protocol index for XDP */
 104#define XDP_PI	11
 105
 106static void qede_remove(struct pci_dev *pdev);
 107static void qede_shutdown(struct pci_dev *pdev);
 108static void qede_link_update(void *dev, struct qed_link_output *link);
 109static void qede_schedule_recovery_handler(void *dev);
 110static void qede_recovery_handler(struct qede_dev *edev);
 111static void qede_schedule_hw_err_handler(void *dev,
 112					 enum qed_hw_err_type err_type);
 113static void qede_get_eth_tlv_data(void *edev, void *data);
 114static void qede_get_generic_tlv_data(void *edev,
 115				      struct qed_generic_tlvs *data);
 116static void qede_generic_hw_err_handler(struct qede_dev *edev);
 117#ifdef CONFIG_QED_SRIOV
 118static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
 119			    __be16 vlan_proto)
 120{
 121	struct qede_dev *edev = netdev_priv(ndev);
 122
 123	if (vlan > 4095) {
 124		DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
 125		return -EINVAL;
 126	}
 127
 128	if (vlan_proto != htons(ETH_P_8021Q))
 129		return -EPROTONOSUPPORT;
 130
 131	DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
 132		   vlan, vf);
 133
 134	return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
 135}
 136
 137static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
 138{
 139	struct qede_dev *edev = netdev_priv(ndev);
 140
 141	DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
 142
 143	if (!is_valid_ether_addr(mac)) {
 144		DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
 145		return -EINVAL;
 146	}
 147
 148	return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
 149}
 150
 151static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
 152{
 153	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
 154	struct qed_dev_info *qed_info = &edev->dev_info.common;
 155	struct qed_update_vport_params *vport_params;
 156	int rc;
 157
 158	vport_params = vzalloc(sizeof(*vport_params));
 159	if (!vport_params)
 160		return -ENOMEM;
 161	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
 162
 163	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
 164
 165	/* Enable/Disable Tx switching for PF */
 166	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
 167	    !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
 168		vport_params->vport_id = 0;
 169		vport_params->update_tx_switching_flg = 1;
 170		vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
 171		edev->ops->vport_update(edev->cdev, vport_params);
 172	}
 173
 174	vfree(vport_params);
 175	return rc;
 176}
 177#endif
 178
 179static int __maybe_unused qede_suspend(struct device *dev)
 180{
 181	dev_info(dev, "Device does not support suspend operation\n");
 182
 183	return -EOPNOTSUPP;
 184}
 185
 186static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
 187
 188static const struct pci_error_handlers qede_err_handler = {
 189	.error_detected = qede_io_error_detected,
 190};
 191
 192static struct pci_driver qede_pci_driver = {
 193	.name = "qede",
 194	.id_table = qede_pci_tbl,
 195	.probe = qede_probe,
 196	.remove = qede_remove,
 197	.shutdown = qede_shutdown,
 198#ifdef CONFIG_QED_SRIOV
 199	.sriov_configure = qede_sriov_configure,
 200#endif
 201	.err_handler = &qede_err_handler,
 202	.driver.pm = &qede_pm_ops,
 203};
 204
 205static struct qed_eth_cb_ops qede_ll_ops = {
 206	{
 207#ifdef CONFIG_RFS_ACCEL
 208		.arfs_filter_op = qede_arfs_filter_op,
 209#endif
 210		.link_update = qede_link_update,
 211		.schedule_recovery_handler = qede_schedule_recovery_handler,
 212		.schedule_hw_err_handler = qede_schedule_hw_err_handler,
 213		.get_generic_tlv_data = qede_get_generic_tlv_data,
 214		.get_protocol_tlv_data = qede_get_eth_tlv_data,
 215	},
 216	.force_mac = qede_force_mac,
 217	.ports_update = qede_udp_ports_update,
 218};
 219
 220static int qede_netdev_event(struct notifier_block *this, unsigned long event,
 221			     void *ptr)
 222{
 223	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
 224	struct ethtool_drvinfo drvinfo;
 225	struct qede_dev *edev;
 226
 227	if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
 228		goto done;
 229
 230	/* Check whether this is a qede device */
 231	if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
 232		goto done;
 233
 234	memset(&drvinfo, 0, sizeof(drvinfo));
 235	ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
 236	if (strcmp(drvinfo.driver, "qede"))
 237		goto done;
 238	edev = netdev_priv(ndev);
 239
 240	switch (event) {
 241	case NETDEV_CHANGENAME:
 242		/* Notify qed of the name change */
 243		if (!edev->ops || !edev->ops->common)
 244			goto done;
 245		edev->ops->common->set_name(edev->cdev, edev->ndev->name);
 246		break;
 247	case NETDEV_CHANGEADDR:
 248		edev = netdev_priv(ndev);
 249		qede_rdma_event_changeaddr(edev);
 250		break;
 251	}
 252
 253done:
 254	return NOTIFY_DONE;
 255}
 256
 257static struct notifier_block qede_netdev_notifier = {
 258	.notifier_call = qede_netdev_event,
 259};
 260
 261static
 262int __init qede_init(void)
 263{
 264	int ret;
 265
 266	pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
 267
 268	qede_forced_speed_maps_init();
 269
 270	qed_ops = qed_get_eth_ops();
 271	if (!qed_ops) {
 272		pr_notice("Failed to get qed ethtool operations\n");
 273		return -EINVAL;
 274	}
 275
 276	/* Must register notifier before pci ops, since we might miss
 277	 * interface rename after pci probe and netdev registration.
 278	 */
 279	ret = register_netdevice_notifier(&qede_netdev_notifier);
 280	if (ret) {
 281		pr_notice("Failed to register netdevice_notifier\n");
 282		qed_put_eth_ops();
 283		return -EINVAL;
 284	}
 285
 286	ret = pci_register_driver(&qede_pci_driver);
 287	if (ret) {
 288		pr_notice("Failed to register driver\n");
 289		unregister_netdevice_notifier(&qede_netdev_notifier);
 290		qed_put_eth_ops();
 291		return -EINVAL;
 292	}
 293
 294	return 0;
 295}
 296
 297static void __exit qede_cleanup(void)
 298{
 299	if (debug & QED_LOG_INFO_MASK)
 300		pr_info("qede_cleanup called\n");
 301
 302	unregister_netdevice_notifier(&qede_netdev_notifier);
 303	pci_unregister_driver(&qede_pci_driver);
 304	qed_put_eth_ops();
 305}
 306
 307module_init(qede_init);
 308module_exit(qede_cleanup);
 309
 310static int qede_open(struct net_device *ndev);
 311static int qede_close(struct net_device *ndev);
 312
 313void qede_fill_by_demand_stats(struct qede_dev *edev)
 314{
 315	struct qede_stats_common *p_common = &edev->stats.common;
 316	struct qed_eth_stats stats;
 317
 318	edev->ops->get_vport_stats(edev->cdev, &stats);
 319
 320	spin_lock(&edev->stats_lock);
 321
 322	p_common->no_buff_discards = stats.common.no_buff_discards;
 323	p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
 324	p_common->ttl0_discard = stats.common.ttl0_discard;
 325	p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
 326	p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
 327	p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
 328	p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
 329	p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
 330	p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
 331	p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
 332	p_common->mac_filter_discards = stats.common.mac_filter_discards;
 333	p_common->gft_filter_drop = stats.common.gft_filter_drop;
 334
 335	p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
 336	p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
 337	p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
 338	p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
 339	p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
 340	p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
 341	p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
 342	p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
 343	p_common->coalesced_events = stats.common.tpa_coalesced_events;
 344	p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
 345	p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
 346	p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
 347
 348	p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
 349	p_common->rx_65_to_127_byte_packets =
 350	    stats.common.rx_65_to_127_byte_packets;
 351	p_common->rx_128_to_255_byte_packets =
 352	    stats.common.rx_128_to_255_byte_packets;
 353	p_common->rx_256_to_511_byte_packets =
 354	    stats.common.rx_256_to_511_byte_packets;
 355	p_common->rx_512_to_1023_byte_packets =
 356	    stats.common.rx_512_to_1023_byte_packets;
 357	p_common->rx_1024_to_1518_byte_packets =
 358	    stats.common.rx_1024_to_1518_byte_packets;
 359	p_common->rx_crc_errors = stats.common.rx_crc_errors;
 360	p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
 361	p_common->rx_pause_frames = stats.common.rx_pause_frames;
 362	p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
 363	p_common->rx_align_errors = stats.common.rx_align_errors;
 364	p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
 365	p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
 366	p_common->rx_jabbers = stats.common.rx_jabbers;
 367	p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
 368	p_common->rx_fragments = stats.common.rx_fragments;
 369	p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
 370	p_common->tx_65_to_127_byte_packets =
 371	    stats.common.tx_65_to_127_byte_packets;
 372	p_common->tx_128_to_255_byte_packets =
 373	    stats.common.tx_128_to_255_byte_packets;
 374	p_common->tx_256_to_511_byte_packets =
 375	    stats.common.tx_256_to_511_byte_packets;
 376	p_common->tx_512_to_1023_byte_packets =
 377	    stats.common.tx_512_to_1023_byte_packets;
 378	p_common->tx_1024_to_1518_byte_packets =
 379	    stats.common.tx_1024_to_1518_byte_packets;
 380	p_common->tx_pause_frames = stats.common.tx_pause_frames;
 381	p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
 382	p_common->brb_truncates = stats.common.brb_truncates;
 383	p_common->brb_discards = stats.common.brb_discards;
 384	p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
 385	p_common->link_change_count = stats.common.link_change_count;
 386	p_common->ptp_skip_txts = edev->ptp_skip_txts;
 387
 388	if (QEDE_IS_BB(edev)) {
 389		struct qede_stats_bb *p_bb = &edev->stats.bb;
 390
 391		p_bb->rx_1519_to_1522_byte_packets =
 392		    stats.bb.rx_1519_to_1522_byte_packets;
 393		p_bb->rx_1519_to_2047_byte_packets =
 394		    stats.bb.rx_1519_to_2047_byte_packets;
 395		p_bb->rx_2048_to_4095_byte_packets =
 396		    stats.bb.rx_2048_to_4095_byte_packets;
 397		p_bb->rx_4096_to_9216_byte_packets =
 398		    stats.bb.rx_4096_to_9216_byte_packets;
 399		p_bb->rx_9217_to_16383_byte_packets =
 400		    stats.bb.rx_9217_to_16383_byte_packets;
 401		p_bb->tx_1519_to_2047_byte_packets =
 402		    stats.bb.tx_1519_to_2047_byte_packets;
 403		p_bb->tx_2048_to_4095_byte_packets =
 404		    stats.bb.tx_2048_to_4095_byte_packets;
 405		p_bb->tx_4096_to_9216_byte_packets =
 406		    stats.bb.tx_4096_to_9216_byte_packets;
 407		p_bb->tx_9217_to_16383_byte_packets =
 408		    stats.bb.tx_9217_to_16383_byte_packets;
 409		p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
 410		p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
 411	} else {
 412		struct qede_stats_ah *p_ah = &edev->stats.ah;
 413
 414		p_ah->rx_1519_to_max_byte_packets =
 415		    stats.ah.rx_1519_to_max_byte_packets;
 416		p_ah->tx_1519_to_max_byte_packets =
 417		    stats.ah.tx_1519_to_max_byte_packets;
 418	}
 419
 420	spin_unlock(&edev->stats_lock);
 421}
 422
 423static void qede_get_stats64(struct net_device *dev,
 424			     struct rtnl_link_stats64 *stats)
 425{
 426	struct qede_dev *edev = netdev_priv(dev);
 427	struct qede_stats_common *p_common;
 428
 
 429	p_common = &edev->stats.common;
 430
 431	spin_lock(&edev->stats_lock);
 432
 433	stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
 434			    p_common->rx_bcast_pkts;
 435	stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
 436			    p_common->tx_bcast_pkts;
 437
 438	stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
 439			  p_common->rx_bcast_bytes;
 440	stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
 441			  p_common->tx_bcast_bytes;
 442
 443	stats->tx_errors = p_common->tx_err_drop_pkts;
 444	stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
 445
 446	stats->rx_fifo_errors = p_common->no_buff_discards;
 447
 448	if (QEDE_IS_BB(edev))
 449		stats->collisions = edev->stats.bb.tx_total_collisions;
 450	stats->rx_crc_errors = p_common->rx_crc_errors;
 451	stats->rx_frame_errors = p_common->rx_align_errors;
 452
 453	spin_unlock(&edev->stats_lock);
 454}
 455
 456#ifdef CONFIG_QED_SRIOV
 457static int qede_get_vf_config(struct net_device *dev, int vfidx,
 458			      struct ifla_vf_info *ivi)
 459{
 460	struct qede_dev *edev = netdev_priv(dev);
 461
 462	if (!edev->ops)
 463		return -EINVAL;
 464
 465	return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
 466}
 467
 468static int qede_set_vf_rate(struct net_device *dev, int vfidx,
 469			    int min_tx_rate, int max_tx_rate)
 470{
 471	struct qede_dev *edev = netdev_priv(dev);
 472
 473	return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
 474					max_tx_rate);
 475}
 476
 477static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
 478{
 479	struct qede_dev *edev = netdev_priv(dev);
 480
 481	if (!edev->ops)
 482		return -EINVAL;
 483
 484	return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
 485}
 486
 487static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
 488				  int link_state)
 489{
 490	struct qede_dev *edev = netdev_priv(dev);
 491
 492	if (!edev->ops)
 493		return -EINVAL;
 494
 495	return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
 496}
 497
 498static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
 499{
 500	struct qede_dev *edev = netdev_priv(dev);
 501
 502	if (!edev->ops)
 503		return -EINVAL;
 504
 505	return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
 506}
 507#endif
 508
 509static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 510{
 511	struct qede_dev *edev = netdev_priv(dev);
 512
 513	if (!netif_running(dev))
 514		return -EAGAIN;
 515
 516	switch (cmd) {
 517	case SIOCSHWTSTAMP:
 518		return qede_ptp_hw_ts(edev, ifr);
 519	default:
 520		DP_VERBOSE(edev, QED_MSG_DEBUG,
 521			   "default IOCTL cmd 0x%x\n", cmd);
 522		return -EOPNOTSUPP;
 523	}
 524
 525	return 0;
 526}
 527
 528static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
 529{
 530	char *p_sb = (char *)fp->sb_info->sb_virt;
 531	u32 sb_size, i;
 532
 533	sb_size = sizeof(struct status_block);
 534
 535	for (i = 0; i < sb_size; i += 8)
 536		DP_NOTICE(edev,
 537			  "%02hhX %02hhX %02hhX %02hhX  %02hhX %02hhX %02hhX %02hhX\n",
 538			  p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
 539			  p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
 540}
 541
 542static void
 543qede_txq_fp_log_metadata(struct qede_dev *edev,
 544			 struct qede_fastpath *fp, struct qede_tx_queue *txq)
 545{
 546	struct qed_chain *p_chain = &txq->tx_pbl;
 547
 548	/* Dump txq/fp/sb ids etc. other metadata */
 549	DP_NOTICE(edev,
 550		  "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
 551		  fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
 552		  p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
 553
 554	/* Dump all the relevant prod/cons indexes */
 555	DP_NOTICE(edev,
 556		  "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
 557		  le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
 558		  qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
 559}
 560
 561static void
 562qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
 563{
 564	struct qed_sb_info_dbg sb_dbg;
 565	int rc;
 566
 567	/* sb info */
 568	qede_fp_sb_dump(edev, fp);
 569
 570	memset(&sb_dbg, 0, sizeof(sb_dbg));
 571	rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
 572
 573	DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
 574		  sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
 575
 576	/* report to mfw */
 577	edev->ops->common->mfw_report(edev->cdev,
 578				      "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
 579				      txq->index, le16_to_cpu(*txq->hw_cons_ptr),
 580				      qed_chain_get_cons_idx(&txq->tx_pbl),
 581				      qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
 582	if (!rc)
 583		edev->ops->common->mfw_report(edev->cdev,
 584					      "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
 585					      txq->index, fp->sb_info->igu_sb_id,
 586					      sb_dbg.igu_prod, sb_dbg.igu_cons,
 587					      sb_dbg.pi[TX_PI(txq->cos)]);
 588}
 589
 590static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
 591{
 592	struct qede_dev *edev = netdev_priv(dev);
 593	int i;
 
 594
 595	netif_carrier_off(dev);
 596	DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
 597
 598	for_each_queue(i) {
 599		struct qede_tx_queue *txq;
 600		struct qede_fastpath *fp;
 601		int cos;
 602
 603		fp = &edev->fp_array[i];
 604		if (!(fp->type & QEDE_FASTPATH_TX))
 605			continue;
 606
 607		for_each_cos_in_txq(edev, cos) {
 608			txq = &fp->txq[cos];
 609
 610			/* Dump basic metadata for all queues */
 611			qede_txq_fp_log_metadata(edev, fp, txq);
 612
 613			if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
 614			    qed_chain_get_prod_idx(&txq->tx_pbl))
 615				qede_tx_log_print(edev, fp, txq);
 616		}
 617	}
 618
 619	if (IS_VF(edev))
 620		return;
 621
 622	if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
 623	    edev->state == QEDE_STATE_RECOVERY) {
 624		DP_INFO(edev,
 625			"Avoid handling a Tx timeout while another HW error is being handled\n");
 626		return;
 627	}
 628
 629	set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
 630	set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
 631	schedule_delayed_work(&edev->sp_task, 0);
 632}
 633
 634static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
 635{
 636	struct qede_dev *edev = netdev_priv(ndev);
 637	int cos, count, offset;
 638
 639	if (num_tc > edev->dev_info.num_tc)
 640		return -EINVAL;
 641
 642	netdev_reset_tc(ndev);
 643	netdev_set_num_tc(ndev, num_tc);
 644
 645	for_each_cos_in_txq(edev, cos) {
 646		count = QEDE_TSS_COUNT(edev);
 647		offset = cos * QEDE_TSS_COUNT(edev);
 648		netdev_set_tc_queue(ndev, cos, count, offset);
 649	}
 650
 651	return 0;
 652}
 653
 654static int
 655qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
 656		__be16 proto)
 657{
 658	switch (f->command) {
 659	case FLOW_CLS_REPLACE:
 660		return qede_add_tc_flower_fltr(edev, proto, f);
 661	case FLOW_CLS_DESTROY:
 662		return qede_delete_flow_filter(edev, f->cookie);
 663	default:
 664		return -EOPNOTSUPP;
 665	}
 666}
 667
 668static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 669				  void *cb_priv)
 670{
 671	struct flow_cls_offload *f;
 672	struct qede_dev *edev = cb_priv;
 673
 674	if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
 675		return -EOPNOTSUPP;
 676
 677	switch (type) {
 678	case TC_SETUP_CLSFLOWER:
 679		f = type_data;
 680		return qede_set_flower(edev, f, f->common.protocol);
 681	default:
 682		return -EOPNOTSUPP;
 683	}
 684}
 685
 686static LIST_HEAD(qede_block_cb_list);
 687
 688static int
 689qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
 690		      void *type_data)
 691{
 692	struct qede_dev *edev = netdev_priv(dev);
 693	struct tc_mqprio_qopt *mqprio;
 694
 695	switch (type) {
 696	case TC_SETUP_BLOCK:
 697		return flow_block_cb_setup_simple(type_data,
 698						  &qede_block_cb_list,
 699						  qede_setup_tc_block_cb,
 700						  edev, edev, true);
 701	case TC_SETUP_QDISC_MQPRIO:
 702		mqprio = type_data;
 703
 704		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
 705		return qede_setup_tc(dev, mqprio->num_tc);
 706	default:
 707		return -EOPNOTSUPP;
 708	}
 709}
 710
 711static const struct net_device_ops qede_netdev_ops = {
 712	.ndo_open		= qede_open,
 713	.ndo_stop		= qede_close,
 714	.ndo_start_xmit		= qede_start_xmit,
 715	.ndo_select_queue	= qede_select_queue,
 716	.ndo_set_rx_mode	= qede_set_rx_mode,
 717	.ndo_set_mac_address	= qede_set_mac_addr,
 718	.ndo_validate_addr	= eth_validate_addr,
 719	.ndo_change_mtu		= qede_change_mtu,
 720	.ndo_eth_ioctl		= qede_ioctl,
 721	.ndo_tx_timeout		= qede_tx_timeout,
 722#ifdef CONFIG_QED_SRIOV
 723	.ndo_set_vf_mac		= qede_set_vf_mac,
 724	.ndo_set_vf_vlan	= qede_set_vf_vlan,
 725	.ndo_set_vf_trust	= qede_set_vf_trust,
 726#endif
 727	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
 728	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
 729	.ndo_fix_features	= qede_fix_features,
 730	.ndo_set_features	= qede_set_features,
 731	.ndo_get_stats64	= qede_get_stats64,
 732#ifdef CONFIG_QED_SRIOV
 733	.ndo_set_vf_link_state	= qede_set_vf_link_state,
 734	.ndo_set_vf_spoofchk	= qede_set_vf_spoofchk,
 735	.ndo_get_vf_config	= qede_get_vf_config,
 736	.ndo_set_vf_rate	= qede_set_vf_rate,
 737#endif
 738	.ndo_features_check	= qede_features_check,
 739	.ndo_bpf		= qede_xdp,
 740#ifdef CONFIG_RFS_ACCEL
 741	.ndo_rx_flow_steer	= qede_rx_flow_steer,
 742#endif
 743	.ndo_xdp_xmit		= qede_xdp_transmit,
 744	.ndo_setup_tc		= qede_setup_tc_offload,
 745};
 746
 747static const struct net_device_ops qede_netdev_vf_ops = {
 748	.ndo_open		= qede_open,
 749	.ndo_stop		= qede_close,
 750	.ndo_start_xmit		= qede_start_xmit,
 751	.ndo_select_queue	= qede_select_queue,
 752	.ndo_set_rx_mode	= qede_set_rx_mode,
 753	.ndo_set_mac_address	= qede_set_mac_addr,
 754	.ndo_validate_addr	= eth_validate_addr,
 755	.ndo_change_mtu		= qede_change_mtu,
 756	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
 757	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
 758	.ndo_fix_features	= qede_fix_features,
 759	.ndo_set_features	= qede_set_features,
 760	.ndo_get_stats64	= qede_get_stats64,
 761	.ndo_features_check	= qede_features_check,
 762};
 763
 764static const struct net_device_ops qede_netdev_vf_xdp_ops = {
 765	.ndo_open		= qede_open,
 766	.ndo_stop		= qede_close,
 767	.ndo_start_xmit		= qede_start_xmit,
 768	.ndo_select_queue	= qede_select_queue,
 769	.ndo_set_rx_mode	= qede_set_rx_mode,
 770	.ndo_set_mac_address	= qede_set_mac_addr,
 771	.ndo_validate_addr	= eth_validate_addr,
 772	.ndo_change_mtu		= qede_change_mtu,
 773	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
 774	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
 775	.ndo_fix_features	= qede_fix_features,
 776	.ndo_set_features	= qede_set_features,
 777	.ndo_get_stats64	= qede_get_stats64,
 778	.ndo_features_check	= qede_features_check,
 779	.ndo_bpf		= qede_xdp,
 780	.ndo_xdp_xmit		= qede_xdp_transmit,
 781};
 782
 783/* -------------------------------------------------------------------------
 784 * START OF PROBE / REMOVE
 785 * -------------------------------------------------------------------------
 786 */
 787
 788static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
 789					    struct pci_dev *pdev,
 790					    struct qed_dev_eth_info *info,
 791					    u32 dp_module, u8 dp_level)
 792{
 793	struct net_device *ndev;
 794	struct qede_dev *edev;
 795
 796	ndev = alloc_etherdev_mqs(sizeof(*edev),
 797				  info->num_queues * info->num_tc,
 798				  info->num_queues);
 799	if (!ndev) {
 800		pr_err("etherdev allocation failed\n");
 801		return NULL;
 802	}
 803
 804	edev = netdev_priv(ndev);
 805	edev->ndev = ndev;
 806	edev->cdev = cdev;
 807	edev->pdev = pdev;
 808	edev->dp_module = dp_module;
 809	edev->dp_level = dp_level;
 810	edev->ops = qed_ops;
 811
 812	if (is_kdump_kernel()) {
 813		edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
 814		edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
 815	} else {
 816		edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
 817		edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 818	}
 819
 820	DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
 821		info->num_queues, info->num_queues);
 822
 823	SET_NETDEV_DEV(ndev, &pdev->dev);
 824
 825	memset(&edev->stats, 0, sizeof(edev->stats));
 826	memcpy(&edev->dev_info, info, sizeof(*info));
 827
 828	/* As ethtool doesn't have the ability to show WoL behavior as
 829	 * 'default', if device supports it declare it's enabled.
 830	 */
 831	if (edev->dev_info.common.wol_support)
 832		edev->wol_enabled = true;
 833
 834	INIT_LIST_HEAD(&edev->vlan_list);
 835
 836	return edev;
 837}
 838
 839static void qede_init_ndev(struct qede_dev *edev)
 840{
 841	struct net_device *ndev = edev->ndev;
 842	struct pci_dev *pdev = edev->pdev;
 843	bool udp_tunnel_enable = false;
 844	netdev_features_t hw_features;
 845
 846	pci_set_drvdata(pdev, ndev);
 847
 848	ndev->mem_start = edev->dev_info.common.pci_mem_start;
 849	ndev->base_addr = ndev->mem_start;
 850	ndev->mem_end = edev->dev_info.common.pci_mem_end;
 851	ndev->irq = edev->dev_info.common.pci_irq;
 852
 853	ndev->watchdog_timeo = TX_TIMEOUT;
 854
 855	if (IS_VF(edev)) {
 856		if (edev->dev_info.xdp_supported)
 857			ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
 858		else
 859			ndev->netdev_ops = &qede_netdev_vf_ops;
 860	} else {
 861		ndev->netdev_ops = &qede_netdev_ops;
 862	}
 863
 864	qede_set_ethtool_ops(ndev);
 865
 866	ndev->priv_flags |= IFF_UNICAST_FLT;
 867
 868	/* user-changeble features */
 869	hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
 870		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 871		      NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
 872
 873	if (edev->dev_info.common.b_arfs_capable)
 874		hw_features |= NETIF_F_NTUPLE;
 875
 876	if (edev->dev_info.common.vxlan_enable ||
 877	    edev->dev_info.common.geneve_enable)
 878		udp_tunnel_enable = true;
 879
 880	if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
 881		hw_features |= NETIF_F_TSO_ECN;
 882		ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 883					NETIF_F_SG | NETIF_F_TSO |
 884					NETIF_F_TSO_ECN | NETIF_F_TSO6 |
 885					NETIF_F_RXCSUM;
 886	}
 887
 888	if (udp_tunnel_enable) {
 889		hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
 890				NETIF_F_GSO_UDP_TUNNEL_CSUM);
 891		ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
 892					  NETIF_F_GSO_UDP_TUNNEL_CSUM);
 893
 894		qede_set_udp_tunnels(edev);
 895	}
 896
 897	if (edev->dev_info.common.gre_enable) {
 898		hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
 899		ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
 900					  NETIF_F_GSO_GRE_CSUM);
 901	}
 902
 903	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
 904			      NETIF_F_HIGHDMA;
 905	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
 906			 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
 907			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
 908
 909	ndev->hw_features = hw_features;
 910
 911	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
 912			     NETDEV_XDP_ACT_NDO_XMIT;
 913
 914	/* MTU range: 46 - 9600 */
 915	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
 916	ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
 917
 918	/* Set network device HW mac */
 919	eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
 920
 921	ndev->mtu = edev->dev_info.common.mtu;
 922}
 923
 924/* This function converts from 32b param to two params of level and module
 925 * Input 32b decoding:
 926 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
 927 * 'happy' flow, e.g. memory allocation failed.
 928 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
 929 * and provide important parameters.
 930 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
 931 * module. VERBOSE prints are for tracking the specific flow in low level.
 932 *
 933 * Notice that the level should be that of the lowest required logs.
 934 */
 935void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
 936{
 937	*p_dp_level = QED_LEVEL_NOTICE;
 938	*p_dp_module = 0;
 939
 940	if (debug & QED_LOG_VERBOSE_MASK) {
 941		*p_dp_level = QED_LEVEL_VERBOSE;
 942		*p_dp_module = (debug & 0x3FFFFFFF);
 943	} else if (debug & QED_LOG_INFO_MASK) {
 944		*p_dp_level = QED_LEVEL_INFO;
 945	} else if (debug & QED_LOG_NOTICE_MASK) {
 946		*p_dp_level = QED_LEVEL_NOTICE;
 947	}
 948}
 949
 950static void qede_free_fp_array(struct qede_dev *edev)
 951{
 952	if (edev->fp_array) {
 953		struct qede_fastpath *fp;
 954		int i;
 955
 956		for_each_queue(i) {
 957			fp = &edev->fp_array[i];
 958
 959			kfree(fp->sb_info);
 960			/* Handle mem alloc failure case where qede_init_fp
 961			 * didn't register xdp_rxq_info yet.
 962			 * Implicit only (fp->type & QEDE_FASTPATH_RX)
 963			 */
 964			if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
 965				xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
 966			kfree(fp->rxq);
 967			kfree(fp->xdp_tx);
 968			kfree(fp->txq);
 969		}
 970		kfree(edev->fp_array);
 971	}
 972
 973	edev->num_queues = 0;
 974	edev->fp_num_tx = 0;
 975	edev->fp_num_rx = 0;
 976}
 977
 978static int qede_alloc_fp_array(struct qede_dev *edev)
 979{
 980	u8 fp_combined, fp_rx = edev->fp_num_rx;
 981	struct qede_fastpath *fp;
 
 982	int i;
 983
 984	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
 985				 sizeof(*edev->fp_array), GFP_KERNEL);
 986	if (!edev->fp_array) {
 987		DP_NOTICE(edev, "fp array allocation failed\n");
 988		goto err;
 989	}
 990
 991	if (!edev->coal_entry) {
 992		edev->coal_entry = kcalloc(QEDE_MAX_RSS_CNT(edev),
 993					   sizeof(*edev->coal_entry),
 994					   GFP_KERNEL);
 995		if (!edev->coal_entry) {
 996			DP_ERR(edev, "coalesce entry allocation failed\n");
 997			goto err;
 998		}
 999	}
 
1000
1001	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
1002
1003	/* Allocate the FP elements for Rx queues followed by combined and then
1004	 * the Tx. This ordering should be maintained so that the respective
1005	 * queues (Rx or Tx) will be together in the fastpath array and the
1006	 * associated ids will be sequential.
1007	 */
1008	for_each_queue(i) {
1009		fp = &edev->fp_array[i];
1010
1011		fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
1012		if (!fp->sb_info) {
1013			DP_NOTICE(edev, "sb info struct allocation failed\n");
1014			goto err;
1015		}
1016
1017		if (fp_rx) {
1018			fp->type = QEDE_FASTPATH_RX;
1019			fp_rx--;
1020		} else if (fp_combined) {
1021			fp->type = QEDE_FASTPATH_COMBINED;
1022			fp_combined--;
1023		} else {
1024			fp->type = QEDE_FASTPATH_TX;
1025		}
1026
1027		if (fp->type & QEDE_FASTPATH_TX) {
1028			fp->txq = kcalloc(edev->dev_info.num_tc,
1029					  sizeof(*fp->txq), GFP_KERNEL);
1030			if (!fp->txq)
1031				goto err;
1032		}
1033
1034		if (fp->type & QEDE_FASTPATH_RX) {
1035			fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
1036			if (!fp->rxq)
1037				goto err;
1038
1039			if (edev->xdp_prog) {
1040				fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
1041						     GFP_KERNEL);
1042				if (!fp->xdp_tx)
1043					goto err;
1044				fp->type |= QEDE_FASTPATH_XDP;
1045			}
1046		}
1047	}
1048
1049	return 0;
1050err:
1051	qede_free_fp_array(edev);
1052	return -ENOMEM;
1053}
1054
1055/* The qede lock is used to protect driver state change and driver flows that
1056 * are not reentrant.
1057 */
1058void __qede_lock(struct qede_dev *edev)
1059{
1060	mutex_lock(&edev->qede_lock);
1061}
1062
1063void __qede_unlock(struct qede_dev *edev)
1064{
1065	mutex_unlock(&edev->qede_lock);
1066}
1067
1068/* This version of the lock should be used when acquiring the RTNL lock is also
1069 * needed in addition to the internal qede lock.
1070 */
1071static void qede_lock(struct qede_dev *edev)
1072{
1073	rtnl_lock();
1074	__qede_lock(edev);
1075}
1076
1077static void qede_unlock(struct qede_dev *edev)
1078{
1079	__qede_unlock(edev);
1080	rtnl_unlock();
1081}
1082
1083static void qede_periodic_task(struct work_struct *work)
1084{
1085	struct qede_dev *edev = container_of(work, struct qede_dev,
1086					     periodic_task.work);
1087
1088	qede_fill_by_demand_stats(edev);
1089	schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
1090}
1091
1092static void qede_init_periodic_task(struct qede_dev *edev)
1093{
1094	INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
1095	spin_lock_init(&edev->stats_lock);
1096	edev->stats_coal_usecs = USEC_PER_SEC;
1097	edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
1098}
1099
1100static void qede_sp_task(struct work_struct *work)
1101{
1102	struct qede_dev *edev = container_of(work, struct qede_dev,
1103					     sp_task.work);
1104
1105	/* Disable execution of this deferred work once
1106	 * qede removal is in progress, this stop any future
1107	 * scheduling of sp_task.
1108	 */
1109	if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
1110		return;
1111
1112	/* The locking scheme depends on the specific flag:
1113	 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1114	 * ensure that ongoing flows are ended and new ones are not started.
1115	 * In other cases - only the internal qede lock should be acquired.
1116	 */
1117
1118	if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
1119		cancel_delayed_work_sync(&edev->periodic_task);
1120#ifdef CONFIG_QED_SRIOV
1121		/* SRIOV must be disabled outside the lock to avoid a deadlock.
1122		 * The recovery of the active VFs is currently not supported.
1123		 */
1124		if (pci_num_vf(edev->pdev))
1125			qede_sriov_configure(edev->pdev, 0);
1126#endif
1127		qede_lock(edev);
1128		qede_recovery_handler(edev);
1129		qede_unlock(edev);
1130	}
1131
1132	__qede_lock(edev);
1133
1134	if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1135		if (edev->state == QEDE_STATE_OPEN)
1136			qede_config_rx_mode(edev->ndev);
1137
1138#ifdef CONFIG_RFS_ACCEL
1139	if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1140		if (edev->state == QEDE_STATE_OPEN)
1141			qede_process_arfs_filters(edev, false);
1142	}
1143#endif
1144	if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1145		qede_generic_hw_err_handler(edev);
1146	__qede_unlock(edev);
1147
1148	if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1149#ifdef CONFIG_QED_SRIOV
1150		/* SRIOV must be disabled outside the lock to avoid a deadlock.
1151		 * The recovery of the active VFs is currently not supported.
1152		 */
1153		if (pci_num_vf(edev->pdev))
1154			qede_sriov_configure(edev->pdev, 0);
1155#endif
1156		edev->ops->common->recovery_process(edev->cdev);
1157	}
1158}
1159
1160static void qede_update_pf_params(struct qed_dev *cdev)
1161{
1162	struct qed_pf_params pf_params;
1163	u16 num_cons;
1164
1165	/* 64 rx + 64 tx + 64 XDP */
1166	memset(&pf_params, 0, sizeof(struct qed_pf_params));
1167
1168	/* 1 rx + 1 xdp + max tx cos */
1169	num_cons = QED_MIN_L2_CONS;
1170
1171	pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1172
1173	/* Same for VFs - make sure they'll have sufficient connections
1174	 * to support XDP Tx queues.
1175	 */
1176	pf_params.eth_pf_params.num_vf_cons = 48;
1177
1178	pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1179	qed_ops->common->update_pf_params(cdev, &pf_params);
1180}
1181
1182#define QEDE_FW_VER_STR_SIZE	80
1183
1184static void qede_log_probe(struct qede_dev *edev)
1185{
1186	struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1187	u8 buf[QEDE_FW_VER_STR_SIZE];
1188	size_t left_size;
1189
1190	snprintf(buf, QEDE_FW_VER_STR_SIZE,
1191		 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1192		 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1193		 p_dev_info->fw_eng,
1194		 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1195		 QED_MFW_VERSION_3_OFFSET,
1196		 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1197		 QED_MFW_VERSION_2_OFFSET,
1198		 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1199		 QED_MFW_VERSION_1_OFFSET,
1200		 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1201		 QED_MFW_VERSION_0_OFFSET);
1202
1203	left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1204	if (p_dev_info->mbi_version && left_size)
1205		snprintf(buf + strlen(buf), left_size,
1206			 " [MBI %d.%d.%d]",
1207			 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1208			 QED_MBI_VERSION_2_OFFSET,
1209			 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1210			 QED_MBI_VERSION_1_OFFSET,
1211			 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1212			 QED_MBI_VERSION_0_OFFSET);
1213
1214	pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1215		PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1216		buf, edev->ndev->name);
1217}
1218
1219enum qede_probe_mode {
1220	QEDE_PROBE_NORMAL,
1221	QEDE_PROBE_RECOVERY,
1222};
1223
1224static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1225			bool is_vf, enum qede_probe_mode mode)
1226{
1227	struct qed_probe_params probe_params;
1228	struct qed_slowpath_params sp_params;
1229	struct qed_dev_eth_info dev_info;
1230	struct qede_dev *edev;
1231	struct qed_dev *cdev;
1232	int rc;
1233
1234	if (unlikely(dp_level & QED_LEVEL_INFO))
1235		pr_notice("Starting qede probe\n");
1236
1237	memset(&probe_params, 0, sizeof(probe_params));
1238	probe_params.protocol = QED_PROTOCOL_ETH;
1239	probe_params.dp_module = dp_module;
1240	probe_params.dp_level = dp_level;
1241	probe_params.is_vf = is_vf;
1242	probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1243	cdev = qed_ops->common->probe(pdev, &probe_params);
1244	if (!cdev) {
1245		rc = -ENODEV;
1246		goto err0;
1247	}
1248
1249	qede_update_pf_params(cdev);
1250
1251	/* Start the Slowpath-process */
1252	memset(&sp_params, 0, sizeof(sp_params));
1253	sp_params.int_mode = QED_INT_MODE_MSIX;
1254	strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
 
 
 
 
1255	rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1256	if (rc) {
1257		pr_notice("Cannot start slowpath\n");
1258		goto err1;
1259	}
1260
1261	/* Learn information crucial for qede to progress */
1262	rc = qed_ops->fill_dev_info(cdev, &dev_info);
1263	if (rc)
1264		goto err2;
1265
1266	if (mode != QEDE_PROBE_RECOVERY) {
1267		edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1268					   dp_level);
1269		if (!edev) {
1270			rc = -ENOMEM;
1271			goto err2;
1272		}
1273
1274		edev->devlink = qed_ops->common->devlink_register(cdev);
1275		if (IS_ERR(edev->devlink)) {
1276			DP_NOTICE(edev, "Cannot register devlink\n");
1277			rc = PTR_ERR(edev->devlink);
1278			edev->devlink = NULL;
1279			goto err3;
1280		}
1281	} else {
1282		struct net_device *ndev = pci_get_drvdata(pdev);
1283		struct qed_devlink *qdl;
1284
1285		edev = netdev_priv(ndev);
1286		qdl = devlink_priv(edev->devlink);
1287		qdl->cdev = cdev;
 
 
 
 
1288		edev->cdev = cdev;
1289		memset(&edev->stats, 0, sizeof(edev->stats));
1290		memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1291	}
1292
1293	if (is_vf)
1294		set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1295
1296	qede_init_ndev(edev);
1297
1298	rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1299	if (rc)
1300		goto err3;
1301
1302	if (mode != QEDE_PROBE_RECOVERY) {
1303		/* Prepare the lock prior to the registration of the netdev,
1304		 * as once it's registered we might reach flows requiring it
1305		 * [it's even possible to reach a flow needing it directly
1306		 * from there, although it's unlikely].
1307		 */
1308		INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1309		mutex_init(&edev->qede_lock);
1310		qede_init_periodic_task(edev);
1311
1312		rc = register_netdev(edev->ndev);
1313		if (rc) {
1314			DP_NOTICE(edev, "Cannot register net-device\n");
1315			goto err4;
1316		}
1317	}
1318
1319	edev->ops->common->set_name(cdev, edev->ndev->name);
1320
1321	/* PTP not supported on VFs */
1322	if (!is_vf)
1323		qede_ptp_enable(edev);
1324
1325	edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1326
1327#ifdef CONFIG_DCB
1328	if (!IS_VF(edev))
1329		qede_set_dcbnl_ops(edev->ndev);
1330#endif
1331
1332	edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1333
1334	qede_log_probe(edev);
1335
1336	/* retain user config (for example - after recovery) */
1337	if (edev->stats_coal_usecs)
1338		schedule_delayed_work(&edev->periodic_task, 0);
1339
1340	return 0;
1341
1342err4:
1343	qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1344err3:
1345	if (mode != QEDE_PROBE_RECOVERY)
1346		free_netdev(edev->ndev);
1347	else
1348		edev->cdev = NULL;
1349err2:
1350	qed_ops->common->slowpath_stop(cdev);
1351err1:
1352	qed_ops->common->remove(cdev);
1353err0:
1354	return rc;
1355}
1356
1357static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1358{
1359	bool is_vf = false;
1360	u32 dp_module = 0;
1361	u8 dp_level = 0;
1362
1363	switch ((enum qede_pci_private)id->driver_data) {
1364	case QEDE_PRIVATE_VF:
1365		if (debug & QED_LOG_VERBOSE_MASK)
1366			dev_err(&pdev->dev, "Probing a VF\n");
1367		is_vf = true;
1368		break;
1369	default:
1370		if (debug & QED_LOG_VERBOSE_MASK)
1371			dev_err(&pdev->dev, "Probing a PF\n");
1372	}
1373
1374	qede_config_debug(debug, &dp_module, &dp_level);
1375
1376	return __qede_probe(pdev, dp_module, dp_level, is_vf,
1377			    QEDE_PROBE_NORMAL);
1378}
1379
1380enum qede_remove_mode {
1381	QEDE_REMOVE_NORMAL,
1382	QEDE_REMOVE_RECOVERY,
1383};
1384
1385static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1386{
1387	struct net_device *ndev = pci_get_drvdata(pdev);
1388	struct qede_dev *edev;
1389	struct qed_dev *cdev;
1390
1391	if (!ndev) {
1392		dev_info(&pdev->dev, "Device has already been removed\n");
1393		return;
1394	}
1395
1396	edev = netdev_priv(ndev);
1397	cdev = edev->cdev;
1398
1399	DP_INFO(edev, "Starting qede_remove\n");
1400
1401	qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1402
1403	if (mode != QEDE_REMOVE_RECOVERY) {
1404		set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1405		unregister_netdev(ndev);
1406
1407		cancel_delayed_work_sync(&edev->sp_task);
1408		cancel_delayed_work_sync(&edev->periodic_task);
1409
1410		edev->ops->common->set_power_state(cdev, PCI_D0);
1411
1412		pci_set_drvdata(pdev, NULL);
1413	}
1414
1415	qede_ptp_disable(edev);
1416
1417	/* Use global ops since we've freed edev */
1418	qed_ops->common->slowpath_stop(cdev);
1419	if (system_state == SYSTEM_POWER_OFF)
1420		return;
1421
1422	if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1423		qed_ops->common->devlink_unregister(edev->devlink);
1424		edev->devlink = NULL;
1425	}
1426	qed_ops->common->remove(cdev);
1427	edev->cdev = NULL;
1428
1429	/* Since this can happen out-of-sync with other flows,
1430	 * don't release the netdevice until after slowpath stop
1431	 * has been called to guarantee various other contexts
1432	 * [e.g., QED register callbacks] won't break anything when
1433	 * accessing the netdevice.
1434	 */
1435	if (mode != QEDE_REMOVE_RECOVERY) {
1436		kfree(edev->coal_entry);
1437		free_netdev(ndev);
1438	}
1439
1440	dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1441}
1442
1443static void qede_remove(struct pci_dev *pdev)
1444{
1445	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1446}
1447
1448static void qede_shutdown(struct pci_dev *pdev)
1449{
1450	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1451}
1452
1453/* -------------------------------------------------------------------------
1454 * START OF LOAD / UNLOAD
1455 * -------------------------------------------------------------------------
1456 */
1457
1458static int qede_set_num_queues(struct qede_dev *edev)
1459{
1460	int rc;
1461	u16 rss_num;
1462
1463	/* Setup queues according to possible resources*/
1464	if (edev->req_queues)
1465		rss_num = edev->req_queues;
1466	else
1467		rss_num = netif_get_num_default_rss_queues() *
1468			  edev->dev_info.common.num_hwfns;
1469
1470	rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1471
1472	rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1473	if (rc > 0) {
1474		/* Managed to request interrupts for our queues */
1475		edev->num_queues = rc;
1476		DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1477			QEDE_QUEUE_CNT(edev), rss_num);
1478		rc = 0;
1479	}
1480
1481	edev->fp_num_tx = edev->req_num_tx;
1482	edev->fp_num_rx = edev->req_num_rx;
1483
1484	return rc;
1485}
1486
1487static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1488			     u16 sb_id)
1489{
1490	if (sb_info->sb_virt) {
1491		edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1492					      QED_SB_TYPE_L2_QUEUE);
1493		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1494				  (void *)sb_info->sb_virt, sb_info->sb_phys);
1495		memset(sb_info, 0, sizeof(*sb_info));
1496	}
1497}
1498
1499/* This function allocates fast-path status block memory */
1500static int qede_alloc_mem_sb(struct qede_dev *edev,
1501			     struct qed_sb_info *sb_info, u16 sb_id)
1502{
1503	struct status_block *sb_virt;
1504	dma_addr_t sb_phys;
1505	int rc;
1506
1507	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1508				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1509	if (!sb_virt) {
1510		DP_ERR(edev, "Status block allocation failed\n");
1511		return -ENOMEM;
1512	}
1513
1514	rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1515					sb_virt, sb_phys, sb_id,
1516					QED_SB_TYPE_L2_QUEUE);
1517	if (rc) {
1518		DP_ERR(edev, "Status block initialization failed\n");
1519		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1520				  sb_virt, sb_phys);
1521		return rc;
1522	}
1523
1524	return 0;
1525}
1526
1527static void qede_free_rx_buffers(struct qede_dev *edev,
1528				 struct qede_rx_queue *rxq)
1529{
1530	u16 i;
1531
1532	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1533		struct sw_rx_data *rx_buf;
1534		struct page *data;
1535
1536		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1537		data = rx_buf->data;
1538
1539		dma_unmap_page(&edev->pdev->dev,
1540			       rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1541
1542		rx_buf->data = NULL;
1543		__free_page(data);
1544	}
1545}
1546
1547static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1548{
1549	/* Free rx buffers */
1550	qede_free_rx_buffers(edev, rxq);
1551
1552	/* Free the parallel SW ring */
1553	kfree(rxq->sw_rx_ring);
1554
1555	/* Free the real RQ ring used by FW */
1556	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1557	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1558}
1559
1560static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1561{
1562	int i;
1563
1564	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1565		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1566
1567		tpa_info->state = QEDE_AGG_STATE_NONE;
1568	}
1569}
1570
1571/* This function allocates all memory needed per Rx queue */
1572static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1573{
1574	struct qed_chain_init_params params = {
1575		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1576		.num_elems	= RX_RING_SIZE,
1577	};
1578	struct qed_dev *cdev = edev->cdev;
1579	int i, rc, size;
1580
1581	rxq->num_rx_buffers = edev->q_num_rx_buffers;
1582
1583	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1584
1585	rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1586	size = rxq->rx_headroom +
1587	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1588
1589	/* Make sure that the headroom and  payload fit in a single page */
1590	if (rxq->rx_buf_size + size > PAGE_SIZE)
1591		rxq->rx_buf_size = PAGE_SIZE - size;
1592
1593	/* Segment size to split a page in multiple equal parts,
1594	 * unless XDP is used in which case we'd use the entire page.
1595	 */
1596	if (!edev->xdp_prog) {
1597		size = size + rxq->rx_buf_size;
1598		rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1599	} else {
1600		rxq->rx_buf_seg_size = PAGE_SIZE;
1601		edev->ndev->features &= ~NETIF_F_GRO_HW;
1602	}
1603
1604	/* Allocate the parallel driver ring for Rx buffers */
1605	size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1606	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1607	if (!rxq->sw_rx_ring) {
1608		DP_ERR(edev, "Rx buffers ring allocation failed\n");
1609		rc = -ENOMEM;
1610		goto err;
1611	}
1612
1613	/* Allocate FW Rx ring  */
1614	params.mode = QED_CHAIN_MODE_NEXT_PTR;
1615	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1616	params.elem_size = sizeof(struct eth_rx_bd);
1617
1618	rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params);
1619	if (rc)
1620		goto err;
1621
1622	/* Allocate FW completion ring */
1623	params.mode = QED_CHAIN_MODE_PBL;
1624	params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1625	params.elem_size = sizeof(union eth_rx_cqe);
1626
1627	rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params);
1628	if (rc)
1629		goto err;
1630
1631	/* Allocate buffers for the Rx ring */
1632	rxq->filled_buffers = 0;
1633	for (i = 0; i < rxq->num_rx_buffers; i++) {
1634		rc = qede_alloc_rx_buffer(rxq, false);
1635		if (rc) {
1636			DP_ERR(edev,
1637			       "Rx buffers allocation failed at index %d\n", i);
1638			goto err;
1639		}
1640	}
1641
1642	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1643	if (!edev->gro_disable)
1644		qede_set_tpa_param(rxq);
1645err:
1646	return rc;
1647}
1648
1649static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1650{
1651	/* Free the parallel SW ring */
1652	if (txq->is_xdp)
1653		kfree(txq->sw_tx_ring.xdp);
1654	else
1655		kfree(txq->sw_tx_ring.skbs);
1656
1657	/* Free the real RQ ring used by FW */
1658	edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1659}
1660
1661/* This function allocates all memory needed per Tx queue */
1662static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1663{
1664	struct qed_chain_init_params params = {
1665		.mode		= QED_CHAIN_MODE_PBL,
1666		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1667		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1668		.num_elems	= edev->q_num_tx_buffers,
1669		.elem_size	= sizeof(union eth_tx_bd_types),
1670	};
1671	int size, rc;
1672
1673	txq->num_tx_buffers = edev->q_num_tx_buffers;
1674
1675	/* Allocate the parallel driver ring for Tx buffers */
1676	if (txq->is_xdp) {
1677		size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1678		txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1679		if (!txq->sw_tx_ring.xdp)
1680			goto err;
1681	} else {
1682		size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1683		txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1684		if (!txq->sw_tx_ring.skbs)
1685			goto err;
1686	}
1687
1688	rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params);
1689	if (rc)
1690		goto err;
1691
1692	return 0;
1693
1694err:
1695	qede_free_mem_txq(edev, txq);
1696	return -ENOMEM;
1697}
1698
1699/* This function frees all memory of a single fp */
1700static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1701{
1702	qede_free_mem_sb(edev, fp->sb_info, fp->id);
1703
1704	if (fp->type & QEDE_FASTPATH_RX)
1705		qede_free_mem_rxq(edev, fp->rxq);
1706
1707	if (fp->type & QEDE_FASTPATH_XDP)
1708		qede_free_mem_txq(edev, fp->xdp_tx);
1709
1710	if (fp->type & QEDE_FASTPATH_TX) {
1711		int cos;
1712
1713		for_each_cos_in_txq(edev, cos)
1714			qede_free_mem_txq(edev, &fp->txq[cos]);
1715	}
1716}
1717
1718/* This function allocates all memory needed for a single fp (i.e. an entity
1719 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1720 */
1721static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1722{
1723	int rc = 0;
1724
1725	rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1726	if (rc)
1727		goto out;
1728
1729	if (fp->type & QEDE_FASTPATH_RX) {
1730		rc = qede_alloc_mem_rxq(edev, fp->rxq);
1731		if (rc)
1732			goto out;
1733	}
1734
1735	if (fp->type & QEDE_FASTPATH_XDP) {
1736		rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1737		if (rc)
1738			goto out;
1739	}
1740
1741	if (fp->type & QEDE_FASTPATH_TX) {
1742		int cos;
1743
1744		for_each_cos_in_txq(edev, cos) {
1745			rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1746			if (rc)
1747				goto out;
1748		}
1749	}
1750
1751out:
1752	return rc;
1753}
1754
1755static void qede_free_mem_load(struct qede_dev *edev)
1756{
1757	int i;
1758
1759	for_each_queue(i) {
1760		struct qede_fastpath *fp = &edev->fp_array[i];
1761
1762		qede_free_mem_fp(edev, fp);
1763	}
1764}
1765
1766/* This function allocates all qede memory at NIC load. */
1767static int qede_alloc_mem_load(struct qede_dev *edev)
1768{
1769	int rc = 0, queue_id;
1770
1771	for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1772		struct qede_fastpath *fp = &edev->fp_array[queue_id];
1773
1774		rc = qede_alloc_mem_fp(edev, fp);
1775		if (rc) {
1776			DP_ERR(edev,
1777			       "Failed to allocate memory for fastpath - rss id = %d\n",
1778			       queue_id);
1779			qede_free_mem_load(edev);
1780			return rc;
1781		}
1782	}
1783
1784	return 0;
1785}
1786
1787static void qede_empty_tx_queue(struct qede_dev *edev,
1788				struct qede_tx_queue *txq)
1789{
1790	unsigned int pkts_compl = 0, bytes_compl = 0;
1791	struct netdev_queue *netdev_txq;
1792	int rc, len = 0;
1793
1794	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1795
1796	while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1797	       qed_chain_get_prod_idx(&txq->tx_pbl)) {
1798		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1799			   "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1800			   txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1801			   qed_chain_get_prod_idx(&txq->tx_pbl));
1802
1803		rc = qede_free_tx_pkt(edev, txq, &len);
1804		if (rc) {
1805			DP_NOTICE(edev,
1806				  "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1807				  txq->index,
1808				  qed_chain_get_cons_idx(&txq->tx_pbl),
1809				  qed_chain_get_prod_idx(&txq->tx_pbl));
1810			break;
1811		}
1812
1813		bytes_compl += len;
1814		pkts_compl++;
1815		txq->sw_tx_cons++;
1816	}
1817
1818	netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1819}
1820
1821static void qede_empty_tx_queues(struct qede_dev *edev)
1822{
1823	int i;
1824
1825	for_each_queue(i)
1826		if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1827			int cos;
1828
1829			for_each_cos_in_txq(edev, cos) {
1830				struct qede_fastpath *fp;
1831
1832				fp = &edev->fp_array[i];
1833				qede_empty_tx_queue(edev,
1834						    &fp->txq[cos]);
1835			}
1836		}
1837}
1838
1839/* This function inits fp content and resets the SB, RXQ and TXQ structures */
1840static void qede_init_fp(struct qede_dev *edev)
1841{
1842	int queue_id, rxq_index = 0, txq_index = 0;
1843	struct qede_fastpath *fp;
1844	bool init_xdp = false;
1845
1846	for_each_queue(queue_id) {
1847		fp = &edev->fp_array[queue_id];
1848
1849		fp->edev = edev;
1850		fp->id = queue_id;
1851
1852		if (fp->type & QEDE_FASTPATH_XDP) {
1853			fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1854								rxq_index);
1855			fp->xdp_tx->is_xdp = 1;
1856
1857			spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1858			init_xdp = true;
1859		}
1860
1861		if (fp->type & QEDE_FASTPATH_RX) {
1862			fp->rxq->rxq_id = rxq_index++;
1863
1864			/* Determine how to map buffers for this queue */
1865			if (fp->type & QEDE_FASTPATH_XDP)
1866				fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1867			else
1868				fp->rxq->data_direction = DMA_FROM_DEVICE;
1869			fp->rxq->dev = &edev->pdev->dev;
1870
1871			/* Driver have no error path from here */
1872			WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1873						 fp->rxq->rxq_id, 0) < 0);
1874
1875			if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1876						       MEM_TYPE_PAGE_ORDER0,
1877						       NULL)) {
1878				DP_NOTICE(edev,
1879					  "Failed to register XDP memory model\n");
1880			}
1881		}
1882
1883		if (fp->type & QEDE_FASTPATH_TX) {
1884			int cos;
1885
1886			for_each_cos_in_txq(edev, cos) {
1887				struct qede_tx_queue *txq = &fp->txq[cos];
1888				u16 ndev_tx_id;
1889
1890				txq->cos = cos;
1891				txq->index = txq_index;
1892				ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1893				txq->ndev_txq_id = ndev_tx_id;
1894
1895				if (edev->dev_info.is_legacy)
1896					txq->is_legacy = true;
1897				txq->dev = &edev->pdev->dev;
1898			}
1899
1900			txq_index++;
1901		}
1902
1903		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1904			 edev->ndev->name, queue_id);
1905	}
1906
1907	if (init_xdp) {
1908		edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1909		DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1910	}
1911}
1912
1913static int qede_set_real_num_queues(struct qede_dev *edev)
1914{
1915	int rc = 0;
1916
1917	rc = netif_set_real_num_tx_queues(edev->ndev,
1918					  QEDE_TSS_COUNT(edev) *
1919					  edev->dev_info.num_tc);
1920	if (rc) {
1921		DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1922		return rc;
1923	}
1924
1925	rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1926	if (rc) {
1927		DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1928		return rc;
1929	}
1930
1931	return 0;
1932}
1933
1934static void qede_napi_disable_remove(struct qede_dev *edev)
1935{
1936	int i;
1937
1938	for_each_queue(i) {
1939		napi_disable(&edev->fp_array[i].napi);
1940
1941		netif_napi_del(&edev->fp_array[i].napi);
1942	}
1943}
1944
1945static void qede_napi_add_enable(struct qede_dev *edev)
1946{
1947	int i;
1948
1949	/* Add NAPI objects */
1950	for_each_queue(i) {
1951		netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
 
1952		napi_enable(&edev->fp_array[i].napi);
1953	}
1954}
1955
1956static void qede_sync_free_irqs(struct qede_dev *edev)
1957{
1958	int i;
1959
1960	for (i = 0; i < edev->int_info.used_cnt; i++) {
1961		if (edev->int_info.msix_cnt) {
 
1962			free_irq(edev->int_info.msix[i].vector,
1963				 &edev->fp_array[i]);
1964		} else {
1965			edev->ops->common->simd_handler_clean(edev->cdev, i);
1966		}
1967	}
1968
1969	edev->int_info.used_cnt = 0;
1970	edev->int_info.msix_cnt = 0;
1971}
1972
1973static int qede_req_msix_irqs(struct qede_dev *edev)
1974{
1975	int i, rc;
1976
1977	/* Sanitize number of interrupts == number of prepared RSS queues */
1978	if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1979		DP_ERR(edev,
1980		       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1981		       QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1982		return -EINVAL;
1983	}
1984
1985	for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1986#ifdef CONFIG_RFS_ACCEL
1987		struct qede_fastpath *fp = &edev->fp_array[i];
1988
1989		if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1990			rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1991					      edev->int_info.msix[i].vector);
1992			if (rc) {
1993				DP_ERR(edev, "Failed to add CPU rmap\n");
1994				qede_free_arfs(edev);
1995			}
1996		}
1997#endif
1998		rc = request_irq(edev->int_info.msix[i].vector,
1999				 qede_msix_fp_int, 0, edev->fp_array[i].name,
2000				 &edev->fp_array[i]);
2001		if (rc) {
2002			DP_ERR(edev, "Request fp %d irq failed\n", i);
2003#ifdef CONFIG_RFS_ACCEL
2004			if (edev->ndev->rx_cpu_rmap)
2005				free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2006
2007			edev->ndev->rx_cpu_rmap = NULL;
2008#endif
2009			qede_sync_free_irqs(edev);
2010			return rc;
2011		}
2012		DP_VERBOSE(edev, NETIF_MSG_INTR,
2013			   "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
2014			   edev->fp_array[i].name, i,
2015			   &edev->fp_array[i]);
2016		edev->int_info.used_cnt++;
2017	}
2018
2019	return 0;
2020}
2021
2022static void qede_simd_fp_handler(void *cookie)
2023{
2024	struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
2025
2026	napi_schedule_irqoff(&fp->napi);
2027}
2028
2029static int qede_setup_irqs(struct qede_dev *edev)
2030{
2031	int i, rc = 0;
2032
2033	/* Learn Interrupt configuration */
2034	rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
2035	if (rc)
2036		return rc;
2037
2038	if (edev->int_info.msix_cnt) {
2039		rc = qede_req_msix_irqs(edev);
2040		if (rc)
2041			return rc;
2042		edev->ndev->irq = edev->int_info.msix[0].vector;
2043	} else {
2044		const struct qed_common_ops *ops;
2045
2046		/* qed should learn receive the RSS ids and callbacks */
2047		ops = edev->ops->common;
2048		for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
2049			ops->simd_handler_config(edev->cdev,
2050						 &edev->fp_array[i], i,
2051						 qede_simd_fp_handler);
2052		edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
2053	}
2054	return 0;
2055}
2056
2057static int qede_drain_txq(struct qede_dev *edev,
2058			  struct qede_tx_queue *txq, bool allow_drain)
2059{
2060	int rc, cnt = 1000;
2061
2062	while (txq->sw_tx_cons != txq->sw_tx_prod) {
2063		if (!cnt) {
2064			if (allow_drain) {
2065				DP_NOTICE(edev,
2066					  "Tx queue[%d] is stuck, requesting MCP to drain\n",
2067					  txq->index);
2068				rc = edev->ops->common->drain(edev->cdev);
2069				if (rc)
2070					return rc;
2071				return qede_drain_txq(edev, txq, false);
2072			}
2073			DP_NOTICE(edev,
2074				  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2075				  txq->index, txq->sw_tx_prod,
2076				  txq->sw_tx_cons);
2077			return -ENODEV;
2078		}
2079		cnt--;
2080		usleep_range(1000, 2000);
2081		barrier();
2082	}
2083
2084	/* FW finished processing, wait for HW to transmit all tx packets */
2085	usleep_range(1000, 2000);
2086
2087	return 0;
2088}
2089
2090static int qede_stop_txq(struct qede_dev *edev,
2091			 struct qede_tx_queue *txq, int rss_id)
2092{
2093	/* delete doorbell from doorbell recovery mechanism */
2094	edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
2095					   &txq->tx_db);
2096
2097	return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
2098}
2099
2100static int qede_stop_queues(struct qede_dev *edev)
2101{
2102	struct qed_update_vport_params *vport_update_params;
2103	struct qed_dev *cdev = edev->cdev;
2104	struct qede_fastpath *fp;
2105	int rc, i;
2106
2107	/* Disable the vport */
2108	vport_update_params = vzalloc(sizeof(*vport_update_params));
2109	if (!vport_update_params)
2110		return -ENOMEM;
2111
2112	vport_update_params->vport_id = 0;
2113	vport_update_params->update_vport_active_flg = 1;
2114	vport_update_params->vport_active_flg = 0;
2115	vport_update_params->update_rss_flg = 0;
2116
2117	rc = edev->ops->vport_update(cdev, vport_update_params);
2118	vfree(vport_update_params);
2119
2120	if (rc) {
2121		DP_ERR(edev, "Failed to update vport\n");
2122		return rc;
2123	}
2124
2125	/* Flush Tx queues. If needed, request drain from MCP */
2126	for_each_queue(i) {
2127		fp = &edev->fp_array[i];
2128
2129		if (fp->type & QEDE_FASTPATH_TX) {
2130			int cos;
2131
2132			for_each_cos_in_txq(edev, cos) {
2133				rc = qede_drain_txq(edev, &fp->txq[cos], true);
2134				if (rc)
2135					return rc;
2136			}
2137		}
2138
2139		if (fp->type & QEDE_FASTPATH_XDP) {
2140			rc = qede_drain_txq(edev, fp->xdp_tx, true);
2141			if (rc)
2142				return rc;
2143		}
2144	}
2145
2146	/* Stop all Queues in reverse order */
2147	for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2148		fp = &edev->fp_array[i];
2149
2150		/* Stop the Tx Queue(s) */
2151		if (fp->type & QEDE_FASTPATH_TX) {
2152			int cos;
2153
2154			for_each_cos_in_txq(edev, cos) {
2155				rc = qede_stop_txq(edev, &fp->txq[cos], i);
2156				if (rc)
2157					return rc;
2158			}
2159		}
2160
2161		/* Stop the Rx Queue */
2162		if (fp->type & QEDE_FASTPATH_RX) {
2163			rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2164			if (rc) {
2165				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2166				return rc;
2167			}
2168		}
2169
2170		/* Stop the XDP forwarding queue */
2171		if (fp->type & QEDE_FASTPATH_XDP) {
2172			rc = qede_stop_txq(edev, fp->xdp_tx, i);
2173			if (rc)
2174				return rc;
2175
2176			bpf_prog_put(fp->rxq->xdp_prog);
2177		}
2178	}
2179
2180	/* Stop the vport */
2181	rc = edev->ops->vport_stop(cdev, 0);
2182	if (rc)
2183		DP_ERR(edev, "Failed to stop VPORT\n");
2184
2185	return rc;
2186}
2187
2188static int qede_start_txq(struct qede_dev *edev,
2189			  struct qede_fastpath *fp,
2190			  struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2191{
2192	dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2193	u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2194	struct qed_queue_start_common_params params;
2195	struct qed_txq_start_ret_params ret_params;
2196	int rc;
2197
2198	memset(&params, 0, sizeof(params));
2199	memset(&ret_params, 0, sizeof(ret_params));
2200
2201	/* Let the XDP queue share the queue-zone with one of the regular txq.
2202	 * We don't really care about its coalescing.
2203	 */
2204	if (txq->is_xdp)
2205		params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2206	else
2207		params.queue_id = txq->index;
2208
2209	params.p_sb = fp->sb_info;
2210	params.sb_idx = sb_idx;
2211	params.tc = txq->cos;
2212
2213	rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
2214				   page_cnt, &ret_params);
2215	if (rc) {
2216		DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2217		return rc;
2218	}
2219
2220	txq->doorbell_addr = ret_params.p_doorbell;
2221	txq->handle = ret_params.p_handle;
2222
2223	/* Determine the FW consumer address associated */
2224	txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2225
2226	/* Prepare the doorbell parameters */
2227	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2228	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2229	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2230		  DQ_XCM_ETH_TX_BD_PROD_CMD);
2231	txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2232
2233	/* register doorbell with doorbell recovery mechanism */
2234	rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2235						&txq->tx_db, DB_REC_WIDTH_32B,
2236						DB_REC_KERNEL);
2237
2238	return rc;
2239}
2240
2241static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2242{
2243	int vlan_removal_en = 1;
2244	struct qed_dev *cdev = edev->cdev;
2245	struct qed_dev_info *qed_info = &edev->dev_info.common;
2246	struct qed_update_vport_params *vport_update_params;
2247	struct qed_queue_start_common_params q_params;
2248	struct qed_start_vport_params start = {0};
2249	int rc, i;
2250
2251	if (!edev->num_queues) {
2252		DP_ERR(edev,
2253		       "Cannot update V-VPORT as active as there are no Rx queues\n");
2254		return -EINVAL;
2255	}
2256
2257	vport_update_params = vzalloc(sizeof(*vport_update_params));
2258	if (!vport_update_params)
2259		return -ENOMEM;
2260
2261	start.handle_ptp_pkts = !!(edev->ptp);
2262	start.gro_enable = !edev->gro_disable;
2263	start.mtu = edev->ndev->mtu;
2264	start.vport_id = 0;
2265	start.drop_ttl0 = true;
2266	start.remove_inner_vlan = vlan_removal_en;
2267	start.clear_stats = clear_stats;
2268
2269	rc = edev->ops->vport_start(cdev, &start);
2270
2271	if (rc) {
2272		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2273		goto out;
2274	}
2275
2276	DP_VERBOSE(edev, NETIF_MSG_IFUP,
2277		   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2278		   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2279
2280	for_each_queue(i) {
2281		struct qede_fastpath *fp = &edev->fp_array[i];
2282		dma_addr_t p_phys_table;
2283		u32 page_cnt;
2284
2285		if (fp->type & QEDE_FASTPATH_RX) {
2286			struct qed_rxq_start_ret_params ret_params;
2287			struct qede_rx_queue *rxq = fp->rxq;
2288			__le16 *val;
2289
2290			memset(&ret_params, 0, sizeof(ret_params));
2291			memset(&q_params, 0, sizeof(q_params));
2292			q_params.queue_id = rxq->rxq_id;
2293			q_params.vport_id = 0;
2294			q_params.p_sb = fp->sb_info;
2295			q_params.sb_idx = RX_PI;
2296
2297			p_phys_table =
2298			    qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2299			page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2300
2301			rc = edev->ops->q_rx_start(cdev, i, &q_params,
2302						   rxq->rx_buf_size,
2303						   rxq->rx_bd_ring.p_phys_addr,
2304						   p_phys_table,
2305						   page_cnt, &ret_params);
2306			if (rc) {
2307				DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2308				       rc);
2309				goto out;
2310			}
2311
2312			/* Use the return parameters */
2313			rxq->hw_rxq_prod_addr = ret_params.p_prod;
2314			rxq->handle = ret_params.p_handle;
2315
2316			val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2317			rxq->hw_cons_ptr = val;
2318
2319			qede_update_rx_prod(edev, rxq);
2320		}
2321
2322		if (fp->type & QEDE_FASTPATH_XDP) {
2323			rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2324			if (rc)
2325				goto out;
2326
2327			bpf_prog_add(edev->xdp_prog, 1);
2328			fp->rxq->xdp_prog = edev->xdp_prog;
2329		}
2330
2331		if (fp->type & QEDE_FASTPATH_TX) {
2332			int cos;
2333
2334			for_each_cos_in_txq(edev, cos) {
2335				rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2336						    TX_PI(cos));
2337				if (rc)
2338					goto out;
2339			}
2340		}
2341	}
2342
2343	/* Prepare and send the vport enable */
2344	vport_update_params->vport_id = start.vport_id;
2345	vport_update_params->update_vport_active_flg = 1;
2346	vport_update_params->vport_active_flg = 1;
2347
2348	if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2349	    qed_info->tx_switching) {
2350		vport_update_params->update_tx_switching_flg = 1;
2351		vport_update_params->tx_switching_flg = 1;
2352	}
2353
2354	qede_fill_rss_params(edev, &vport_update_params->rss_params,
2355			     &vport_update_params->update_rss_flg);
2356
2357	rc = edev->ops->vport_update(cdev, vport_update_params);
2358	if (rc)
2359		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2360
2361out:
2362	vfree(vport_update_params);
2363	return rc;
2364}
2365
2366enum qede_unload_mode {
2367	QEDE_UNLOAD_NORMAL,
2368	QEDE_UNLOAD_RECOVERY,
2369};
2370
2371static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2372			bool is_locked)
2373{
2374	struct qed_link_params link_params;
2375	int rc;
2376
2377	DP_INFO(edev, "Starting qede unload\n");
2378
2379	if (!is_locked)
2380		__qede_lock(edev);
2381
2382	clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2383
2384	if (mode != QEDE_UNLOAD_RECOVERY)
2385		edev->state = QEDE_STATE_CLOSED;
2386
2387	qede_rdma_dev_event_close(edev);
2388
2389	/* Close OS Tx */
2390	netif_tx_disable(edev->ndev);
2391	netif_carrier_off(edev->ndev);
2392
2393	if (mode != QEDE_UNLOAD_RECOVERY) {
2394		/* Reset the link */
2395		memset(&link_params, 0, sizeof(link_params));
2396		link_params.link_up = false;
2397		edev->ops->common->set_link(edev->cdev, &link_params);
2398
2399		rc = qede_stop_queues(edev);
2400		if (rc) {
2401#ifdef CONFIG_RFS_ACCEL
2402			if (edev->dev_info.common.b_arfs_capable) {
2403				qede_poll_for_freeing_arfs_filters(edev);
2404				if (edev->ndev->rx_cpu_rmap)
2405					free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2406
2407				edev->ndev->rx_cpu_rmap = NULL;
2408			}
2409#endif
2410			qede_sync_free_irqs(edev);
2411			goto out;
2412		}
2413
2414		DP_INFO(edev, "Stopped Queues\n");
2415	}
2416
2417	qede_vlan_mark_nonconfigured(edev);
2418	edev->ops->fastpath_stop(edev->cdev);
2419
2420	if (edev->dev_info.common.b_arfs_capable) {
2421		qede_poll_for_freeing_arfs_filters(edev);
2422		qede_free_arfs(edev);
2423	}
2424
2425	/* Release the interrupts */
2426	qede_sync_free_irqs(edev);
2427	edev->ops->common->set_fp_int(edev->cdev, 0);
2428
2429	qede_napi_disable_remove(edev);
2430
2431	if (mode == QEDE_UNLOAD_RECOVERY)
2432		qede_empty_tx_queues(edev);
2433
2434	qede_free_mem_load(edev);
2435	qede_free_fp_array(edev);
2436
2437out:
2438	if (!is_locked)
2439		__qede_unlock(edev);
2440
2441	if (mode != QEDE_UNLOAD_RECOVERY)
2442		DP_NOTICE(edev, "Link is down\n");
2443
2444	edev->ptp_skip_txts = 0;
2445
2446	DP_INFO(edev, "Ending qede unload\n");
2447}
2448
2449enum qede_load_mode {
2450	QEDE_LOAD_NORMAL,
2451	QEDE_LOAD_RELOAD,
2452	QEDE_LOAD_RECOVERY,
2453};
2454
2455static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2456		     bool is_locked)
2457{
2458	struct qed_link_params link_params;
2459	struct ethtool_coalesce coal = {};
2460	u8 num_tc;
2461	int rc, i;
2462
2463	DP_INFO(edev, "Starting qede load\n");
2464
2465	if (!is_locked)
2466		__qede_lock(edev);
2467
2468	rc = qede_set_num_queues(edev);
2469	if (rc)
2470		goto out;
2471
2472	rc = qede_alloc_fp_array(edev);
2473	if (rc)
2474		goto out;
2475
2476	qede_init_fp(edev);
2477
2478	rc = qede_alloc_mem_load(edev);
2479	if (rc)
2480		goto err1;
2481	DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2482		QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2483
2484	rc = qede_set_real_num_queues(edev);
2485	if (rc)
2486		goto err2;
2487
2488	if (qede_alloc_arfs(edev)) {
2489		edev->ndev->features &= ~NETIF_F_NTUPLE;
2490		edev->dev_info.common.b_arfs_capable = false;
2491	}
2492
2493	qede_napi_add_enable(edev);
2494	DP_INFO(edev, "Napi added and enabled\n");
2495
2496	rc = qede_setup_irqs(edev);
2497	if (rc)
2498		goto err3;
2499	DP_INFO(edev, "Setup IRQs succeeded\n");
2500
2501	rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2502	if (rc)
2503		goto err4;
2504	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2505
2506	num_tc = netdev_get_num_tc(edev->ndev);
2507	num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2508	qede_setup_tc(edev->ndev, num_tc);
2509
2510	/* Program un-configured VLANs */
2511	qede_configure_vlan_filters(edev);
2512
2513	set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2514
2515	/* Ask for link-up using current configuration */
2516	memset(&link_params, 0, sizeof(link_params));
2517	link_params.link_up = true;
2518	edev->ops->common->set_link(edev->cdev, &link_params);
2519
2520	edev->state = QEDE_STATE_OPEN;
2521
2522	coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
2523	coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
2524
2525	for_each_queue(i) {
2526		if (edev->coal_entry[i].isvalid) {
2527			coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
2528			coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
2529		}
2530		__qede_unlock(edev);
2531		qede_set_per_coalesce(edev->ndev, i, &coal);
2532		__qede_lock(edev);
2533	}
2534	DP_INFO(edev, "Ending successfully qede load\n");
2535
2536	goto out;
2537err4:
2538	qede_sync_free_irqs(edev);
2539err3:
2540	qede_napi_disable_remove(edev);
2541err2:
2542	qede_free_mem_load(edev);
2543err1:
2544	edev->ops->common->set_fp_int(edev->cdev, 0);
2545	qede_free_fp_array(edev);
2546	edev->num_queues = 0;
2547	edev->fp_num_tx = 0;
2548	edev->fp_num_rx = 0;
2549out:
2550	if (!is_locked)
2551		__qede_unlock(edev);
2552
2553	return rc;
2554}
2555
2556/* 'func' should be able to run between unload and reload assuming interface
2557 * is actually running, or afterwards in case it's currently DOWN.
2558 */
2559void qede_reload(struct qede_dev *edev,
2560		 struct qede_reload_args *args, bool is_locked)
2561{
2562	if (!is_locked)
2563		__qede_lock(edev);
2564
2565	/* Since qede_lock is held, internal state wouldn't change even
2566	 * if netdev state would start transitioning. Check whether current
2567	 * internal configuration indicates device is up, then reload.
2568	 */
2569	if (edev->state == QEDE_STATE_OPEN) {
2570		qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2571		if (args)
2572			args->func(edev, args);
2573		qede_load(edev, QEDE_LOAD_RELOAD, true);
2574
2575		/* Since no one is going to do it for us, re-configure */
2576		qede_config_rx_mode(edev->ndev);
2577	} else if (args) {
2578		args->func(edev, args);
2579	}
2580
2581	if (!is_locked)
2582		__qede_unlock(edev);
2583}
2584
2585/* called with rtnl_lock */
2586static int qede_open(struct net_device *ndev)
2587{
2588	struct qede_dev *edev = netdev_priv(ndev);
2589	int rc;
2590
2591	netif_carrier_off(ndev);
2592
2593	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2594
2595	rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2596	if (rc)
2597		return rc;
2598
2599	udp_tunnel_nic_reset_ntf(ndev);
2600
2601	edev->ops->common->update_drv_state(edev->cdev, true);
2602
2603	return 0;
2604}
2605
2606static int qede_close(struct net_device *ndev)
2607{
2608	struct qede_dev *edev = netdev_priv(ndev);
2609
2610	qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2611
2612	if (edev->cdev)
2613		edev->ops->common->update_drv_state(edev->cdev, false);
2614
2615	return 0;
2616}
2617
2618static void qede_link_update(void *dev, struct qed_link_output *link)
2619{
2620	struct qede_dev *edev = dev;
2621
2622	if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2623		DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2624		return;
2625	}
2626
2627	if (link->link_up) {
2628		if (!netif_carrier_ok(edev->ndev)) {
2629			DP_NOTICE(edev, "Link is up\n");
2630			netif_tx_start_all_queues(edev->ndev);
2631			netif_carrier_on(edev->ndev);
2632			qede_rdma_dev_event_open(edev);
2633		}
2634	} else {
2635		if (netif_carrier_ok(edev->ndev)) {
2636			DP_NOTICE(edev, "Link is down\n");
2637			netif_tx_disable(edev->ndev);
2638			netif_carrier_off(edev->ndev);
2639			qede_rdma_dev_event_close(edev);
2640		}
2641	}
2642}
2643
2644static void qede_schedule_recovery_handler(void *dev)
2645{
2646	struct qede_dev *edev = dev;
2647
2648	if (edev->state == QEDE_STATE_RECOVERY) {
2649		DP_NOTICE(edev,
2650			  "Avoid scheduling a recovery handling since already in recovery state\n");
2651		return;
2652	}
2653
2654	set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2655	schedule_delayed_work(&edev->sp_task, 0);
2656
2657	DP_INFO(edev, "Scheduled a recovery handler\n");
2658}
2659
2660static void qede_recovery_failed(struct qede_dev *edev)
2661{
2662	netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2663
2664	netif_device_detach(edev->ndev);
2665
2666	if (edev->cdev)
2667		edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2668}
2669
2670static void qede_recovery_handler(struct qede_dev *edev)
2671{
2672	u32 curr_state = edev->state;
2673	int rc;
2674
2675	DP_NOTICE(edev, "Starting a recovery process\n");
2676
2677	/* No need to acquire first the qede_lock since is done by qede_sp_task
2678	 * before calling this function.
2679	 */
2680	edev->state = QEDE_STATE_RECOVERY;
2681
2682	edev->ops->common->recovery_prolog(edev->cdev);
2683
2684	if (curr_state == QEDE_STATE_OPEN)
2685		qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2686
2687	__qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2688
2689	rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2690			  IS_VF(edev), QEDE_PROBE_RECOVERY);
2691	if (rc) {
2692		edev->cdev = NULL;
2693		goto err;
2694	}
2695
2696	if (curr_state == QEDE_STATE_OPEN) {
2697		rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2698		if (rc)
2699			goto err;
2700
2701		qede_config_rx_mode(edev->ndev);
2702		udp_tunnel_nic_reset_ntf(edev->ndev);
2703	}
2704
2705	edev->state = curr_state;
2706
2707	DP_NOTICE(edev, "Recovery handling is done\n");
2708
2709	return;
2710
2711err:
2712	qede_recovery_failed(edev);
2713}
2714
2715static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2716{
2717	struct qed_dev *cdev = edev->cdev;
2718
2719	DP_NOTICE(edev,
2720		  "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2721		  edev->err_flags);
2722
2723	/* Get a call trace of the flow that led to the error */
2724	WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2725
2726	/* Prevent HW attentions from being reasserted */
2727	if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2728		edev->ops->common->attn_clr_enable(cdev, true);
2729
2730	DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2731}
2732
2733static void qede_generic_hw_err_handler(struct qede_dev *edev)
2734{
2735	DP_NOTICE(edev,
2736		  "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2737		  edev->err_flags);
2738
2739	if (edev->devlink) {
2740		DP_NOTICE(edev, "Reporting fatal error to devlink\n");
2741		edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
2742	}
2743
2744	clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2745
2746	DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2747}
2748
2749static void qede_set_hw_err_flags(struct qede_dev *edev,
2750				  enum qed_hw_err_type err_type)
2751{
2752	unsigned long err_flags = 0;
2753
2754	switch (err_type) {
2755	case QED_HW_ERR_DMAE_FAIL:
2756		set_bit(QEDE_ERR_WARN, &err_flags);
2757		fallthrough;
2758	case QED_HW_ERR_MFW_RESP_FAIL:
2759	case QED_HW_ERR_HW_ATTN:
2760	case QED_HW_ERR_RAMROD_FAIL:
2761	case QED_HW_ERR_FW_ASSERT:
2762		set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2763		set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2764		/* make this error as recoverable and start recovery*/
2765		set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
2766		break;
2767
2768	default:
2769		DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2770		break;
2771	}
2772
2773	edev->err_flags |= err_flags;
2774}
2775
2776static void qede_schedule_hw_err_handler(void *dev,
2777					 enum qed_hw_err_type err_type)
2778{
2779	struct qede_dev *edev = dev;
2780
2781	/* Fan failure cannot be masked by handling of another HW error or by a
2782	 * concurrent recovery process.
2783	 */
2784	if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2785	     edev->state == QEDE_STATE_RECOVERY) &&
2786	     err_type != QED_HW_ERR_FAN_FAIL) {
2787		DP_INFO(edev,
2788			"Avoid scheduling an error handling while another HW error is being handled\n");
2789		return;
2790	}
2791
2792	if (err_type >= QED_HW_ERR_LAST) {
2793		DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2794		clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2795		return;
2796	}
2797
2798	edev->last_err_type = err_type;
2799	qede_set_hw_err_flags(edev, err_type);
2800	qede_atomic_hw_err_handler(edev);
2801	set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2802	schedule_delayed_work(&edev->sp_task, 0);
2803
2804	DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2805}
2806
2807static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2808{
2809	struct netdev_queue *netdev_txq;
2810
2811	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2812	if (netif_xmit_stopped(netdev_txq))
2813		return true;
2814
2815	return false;
2816}
2817
2818static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2819{
2820	struct qede_dev *edev = dev;
2821	struct netdev_hw_addr *ha;
2822	int i;
2823
2824	if (edev->ndev->features & NETIF_F_IP_CSUM)
2825		data->feat_flags |= QED_TLV_IP_CSUM;
2826	if (edev->ndev->features & NETIF_F_TSO)
2827		data->feat_flags |= QED_TLV_LSO;
2828
2829	ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2830	eth_zero_addr(data->mac[1]);
2831	eth_zero_addr(data->mac[2]);
2832	/* Copy the first two UC macs */
2833	netif_addr_lock_bh(edev->ndev);
2834	i = 1;
2835	netdev_for_each_uc_addr(ha, edev->ndev) {
2836		ether_addr_copy(data->mac[i++], ha->addr);
2837		if (i == QED_TLV_MAC_COUNT)
2838			break;
2839	}
2840
2841	netif_addr_unlock_bh(edev->ndev);
2842}
2843
2844static void qede_get_eth_tlv_data(void *dev, void *data)
2845{
2846	struct qed_mfw_tlv_eth *etlv = data;
2847	struct qede_dev *edev = dev;
2848	struct qede_fastpath *fp;
2849	int i;
2850
2851	etlv->lso_maxoff_size = 0XFFFF;
2852	etlv->lso_maxoff_size_set = true;
2853	etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2854	etlv->lso_minseg_size_set = true;
2855	etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2856	etlv->prom_mode_set = true;
2857	etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2858	etlv->tx_descr_size_set = true;
2859	etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2860	etlv->rx_descr_size_set = true;
2861	etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2862	etlv->iov_offload_set = true;
2863
2864	/* Fill information regarding queues; Should be done under the qede
2865	 * lock to guarantee those don't change beneath our feet.
2866	 */
2867	etlv->txqs_empty = true;
2868	etlv->rxqs_empty = true;
2869	etlv->num_txqs_full = 0;
2870	etlv->num_rxqs_full = 0;
2871
2872	__qede_lock(edev);
2873	for_each_queue(i) {
2874		fp = &edev->fp_array[i];
2875		if (fp->type & QEDE_FASTPATH_TX) {
2876			struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2877
2878			if (txq->sw_tx_cons != txq->sw_tx_prod)
2879				etlv->txqs_empty = false;
2880			if (qede_is_txq_full(edev, txq))
2881				etlv->num_txqs_full++;
2882		}
2883		if (fp->type & QEDE_FASTPATH_RX) {
2884			if (qede_has_rx_work(fp->rxq))
2885				etlv->rxqs_empty = false;
2886
2887			/* This one is a bit tricky; Firmware might stop
2888			 * placing packets if ring is not yet full.
2889			 * Give an approximation.
2890			 */
2891			if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2892			    qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2893			    RX_RING_SIZE - 100)
2894				etlv->num_rxqs_full++;
2895		}
2896	}
2897	__qede_unlock(edev);
2898
2899	etlv->txqs_empty_set = true;
2900	etlv->rxqs_empty_set = true;
2901	etlv->num_txqs_full_set = true;
2902	etlv->num_rxqs_full_set = true;
2903}
2904
2905/**
2906 * qede_io_error_detected(): Called when PCI error is detected
2907 *
2908 * @pdev: Pointer to PCI device
2909 * @state: The current pci connection state
2910 *
2911 *Return: pci_ers_result_t.
2912 *
2913 * This function is called after a PCI bus error affecting
2914 * this device has been detected.
2915 */
2916static pci_ers_result_t
2917qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2918{
2919	struct net_device *dev = pci_get_drvdata(pdev);
2920	struct qede_dev *edev = netdev_priv(dev);
2921
2922	if (!edev)
2923		return PCI_ERS_RESULT_NONE;
2924
2925	DP_NOTICE(edev, "IO error detected [%d]\n", state);
2926
2927	__qede_lock(edev);
2928	if (edev->state == QEDE_STATE_RECOVERY) {
2929		DP_NOTICE(edev, "Device already in the recovery state\n");
2930		__qede_unlock(edev);
2931		return PCI_ERS_RESULT_NONE;
2932	}
2933
2934	/* PF handles the recovery of its VFs */
2935	if (IS_VF(edev)) {
2936		DP_VERBOSE(edev, QED_MSG_IOV,
2937			   "VF recovery is handled by its PF\n");
2938		__qede_unlock(edev);
2939		return PCI_ERS_RESULT_RECOVERED;
2940	}
2941
2942	/* Close OS Tx */
2943	netif_tx_disable(edev->ndev);
2944	netif_carrier_off(edev->ndev);
2945
2946	set_bit(QEDE_SP_AER, &edev->sp_flags);
2947	schedule_delayed_work(&edev->sp_task, 0);
2948
2949	__qede_unlock(edev);
2950
2951	return PCI_ERS_RESULT_CAN_RECOVER;
2952}
v5.14.15
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qede NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#include <linux/crash_dump.h>
   8#include <linux/module.h>
   9#include <linux/pci.h>
  10#include <linux/device.h>
  11#include <linux/netdevice.h>
  12#include <linux/etherdevice.h>
  13#include <linux/skbuff.h>
  14#include <linux/errno.h>
  15#include <linux/list.h>
  16#include <linux/string.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/interrupt.h>
  19#include <asm/byteorder.h>
  20#include <asm/param.h>
  21#include <linux/io.h>
  22#include <linux/netdev_features.h>
  23#include <linux/udp.h>
  24#include <linux/tcp.h>
  25#include <net/udp_tunnel.h>
  26#include <linux/ip.h>
  27#include <net/ipv6.h>
  28#include <net/tcp.h>
  29#include <linux/if_ether.h>
  30#include <linux/if_vlan.h>
  31#include <linux/pkt_sched.h>
  32#include <linux/ethtool.h>
  33#include <linux/in.h>
  34#include <linux/random.h>
  35#include <net/ip6_checksum.h>
  36#include <linux/bitops.h>
  37#include <linux/vmalloc.h>
  38#include <linux/aer.h>
  39#include "qede.h"
  40#include "qede_ptp.h"
  41
  42static char version[] =
  43	"QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
  44
  45MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
  46MODULE_LICENSE("GPL");
  47MODULE_VERSION(DRV_MODULE_VERSION);
  48
  49static uint debug;
  50module_param(debug, uint, 0);
  51MODULE_PARM_DESC(debug, " Default debug msglevel");
  52
  53static const struct qed_eth_ops *qed_ops;
  54
  55#define CHIP_NUM_57980S_40		0x1634
  56#define CHIP_NUM_57980S_10		0x1666
  57#define CHIP_NUM_57980S_MF		0x1636
  58#define CHIP_NUM_57980S_100		0x1644
  59#define CHIP_NUM_57980S_50		0x1654
  60#define CHIP_NUM_57980S_25		0x1656
  61#define CHIP_NUM_57980S_IOV		0x1664
  62#define CHIP_NUM_AH			0x8070
  63#define CHIP_NUM_AH_IOV			0x8090
  64
  65#ifndef PCI_DEVICE_ID_NX2_57980E
  66#define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
  67#define PCI_DEVICE_ID_57980S_10		CHIP_NUM_57980S_10
  68#define PCI_DEVICE_ID_57980S_MF		CHIP_NUM_57980S_MF
  69#define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
  70#define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
  71#define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
  72#define PCI_DEVICE_ID_57980S_IOV	CHIP_NUM_57980S_IOV
  73#define PCI_DEVICE_ID_AH		CHIP_NUM_AH
  74#define PCI_DEVICE_ID_AH_IOV		CHIP_NUM_AH_IOV
  75
  76#endif
  77
  78enum qede_pci_private {
  79	QEDE_PRIVATE_PF,
  80	QEDE_PRIVATE_VF
  81};
  82
  83static const struct pci_device_id qede_pci_tbl[] = {
  84	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
  85	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
  86	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
  87	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
  88	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
  89	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
  90#ifdef CONFIG_QED_SRIOV
  91	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
  92#endif
  93	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
  94#ifdef CONFIG_QED_SRIOV
  95	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
  96#endif
  97	{ 0 }
  98};
  99
 100MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
 101
 102static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 103static pci_ers_result_t
 104qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
 105
 106#define TX_TIMEOUT		(5 * HZ)
 107
 108/* Utilize last protocol index for XDP */
 109#define XDP_PI	11
 110
 111static void qede_remove(struct pci_dev *pdev);
 112static void qede_shutdown(struct pci_dev *pdev);
 113static void qede_link_update(void *dev, struct qed_link_output *link);
 114static void qede_schedule_recovery_handler(void *dev);
 115static void qede_recovery_handler(struct qede_dev *edev);
 116static void qede_schedule_hw_err_handler(void *dev,
 117					 enum qed_hw_err_type err_type);
 118static void qede_get_eth_tlv_data(void *edev, void *data);
 119static void qede_get_generic_tlv_data(void *edev,
 120				      struct qed_generic_tlvs *data);
 121static void qede_generic_hw_err_handler(struct qede_dev *edev);
 122#ifdef CONFIG_QED_SRIOV
 123static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
 124			    __be16 vlan_proto)
 125{
 126	struct qede_dev *edev = netdev_priv(ndev);
 127
 128	if (vlan > 4095) {
 129		DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
 130		return -EINVAL;
 131	}
 132
 133	if (vlan_proto != htons(ETH_P_8021Q))
 134		return -EPROTONOSUPPORT;
 135
 136	DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
 137		   vlan, vf);
 138
 139	return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
 140}
 141
 142static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
 143{
 144	struct qede_dev *edev = netdev_priv(ndev);
 145
 146	DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
 147
 148	if (!is_valid_ether_addr(mac)) {
 149		DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
 150		return -EINVAL;
 151	}
 152
 153	return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
 154}
 155
 156static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
 157{
 158	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
 159	struct qed_dev_info *qed_info = &edev->dev_info.common;
 160	struct qed_update_vport_params *vport_params;
 161	int rc;
 162
 163	vport_params = vzalloc(sizeof(*vport_params));
 164	if (!vport_params)
 165		return -ENOMEM;
 166	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
 167
 168	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
 169
 170	/* Enable/Disable Tx switching for PF */
 171	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
 172	    !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
 173		vport_params->vport_id = 0;
 174		vport_params->update_tx_switching_flg = 1;
 175		vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
 176		edev->ops->vport_update(edev->cdev, vport_params);
 177	}
 178
 179	vfree(vport_params);
 180	return rc;
 181}
 182#endif
 183
 
 
 
 
 
 
 
 
 
 184static const struct pci_error_handlers qede_err_handler = {
 185	.error_detected = qede_io_error_detected,
 186};
 187
 188static struct pci_driver qede_pci_driver = {
 189	.name = "qede",
 190	.id_table = qede_pci_tbl,
 191	.probe = qede_probe,
 192	.remove = qede_remove,
 193	.shutdown = qede_shutdown,
 194#ifdef CONFIG_QED_SRIOV
 195	.sriov_configure = qede_sriov_configure,
 196#endif
 197	.err_handler = &qede_err_handler,
 
 198};
 199
 200static struct qed_eth_cb_ops qede_ll_ops = {
 201	{
 202#ifdef CONFIG_RFS_ACCEL
 203		.arfs_filter_op = qede_arfs_filter_op,
 204#endif
 205		.link_update = qede_link_update,
 206		.schedule_recovery_handler = qede_schedule_recovery_handler,
 207		.schedule_hw_err_handler = qede_schedule_hw_err_handler,
 208		.get_generic_tlv_data = qede_get_generic_tlv_data,
 209		.get_protocol_tlv_data = qede_get_eth_tlv_data,
 210	},
 211	.force_mac = qede_force_mac,
 212	.ports_update = qede_udp_ports_update,
 213};
 214
 215static int qede_netdev_event(struct notifier_block *this, unsigned long event,
 216			     void *ptr)
 217{
 218	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
 219	struct ethtool_drvinfo drvinfo;
 220	struct qede_dev *edev;
 221
 222	if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
 223		goto done;
 224
 225	/* Check whether this is a qede device */
 226	if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
 227		goto done;
 228
 229	memset(&drvinfo, 0, sizeof(drvinfo));
 230	ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
 231	if (strcmp(drvinfo.driver, "qede"))
 232		goto done;
 233	edev = netdev_priv(ndev);
 234
 235	switch (event) {
 236	case NETDEV_CHANGENAME:
 237		/* Notify qed of the name change */
 238		if (!edev->ops || !edev->ops->common)
 239			goto done;
 240		edev->ops->common->set_name(edev->cdev, edev->ndev->name);
 241		break;
 242	case NETDEV_CHANGEADDR:
 243		edev = netdev_priv(ndev);
 244		qede_rdma_event_changeaddr(edev);
 245		break;
 246	}
 247
 248done:
 249	return NOTIFY_DONE;
 250}
 251
 252static struct notifier_block qede_netdev_notifier = {
 253	.notifier_call = qede_netdev_event,
 254};
 255
 256static
 257int __init qede_init(void)
 258{
 259	int ret;
 260
 261	pr_info("qede_init: %s\n", version);
 262
 263	qede_forced_speed_maps_init();
 264
 265	qed_ops = qed_get_eth_ops();
 266	if (!qed_ops) {
 267		pr_notice("Failed to get qed ethtool operations\n");
 268		return -EINVAL;
 269	}
 270
 271	/* Must register notifier before pci ops, since we might miss
 272	 * interface rename after pci probe and netdev registration.
 273	 */
 274	ret = register_netdevice_notifier(&qede_netdev_notifier);
 275	if (ret) {
 276		pr_notice("Failed to register netdevice_notifier\n");
 277		qed_put_eth_ops();
 278		return -EINVAL;
 279	}
 280
 281	ret = pci_register_driver(&qede_pci_driver);
 282	if (ret) {
 283		pr_notice("Failed to register driver\n");
 284		unregister_netdevice_notifier(&qede_netdev_notifier);
 285		qed_put_eth_ops();
 286		return -EINVAL;
 287	}
 288
 289	return 0;
 290}
 291
 292static void __exit qede_cleanup(void)
 293{
 294	if (debug & QED_LOG_INFO_MASK)
 295		pr_info("qede_cleanup called\n");
 296
 297	unregister_netdevice_notifier(&qede_netdev_notifier);
 298	pci_unregister_driver(&qede_pci_driver);
 299	qed_put_eth_ops();
 300}
 301
 302module_init(qede_init);
 303module_exit(qede_cleanup);
 304
 305static int qede_open(struct net_device *ndev);
 306static int qede_close(struct net_device *ndev);
 307
 308void qede_fill_by_demand_stats(struct qede_dev *edev)
 309{
 310	struct qede_stats_common *p_common = &edev->stats.common;
 311	struct qed_eth_stats stats;
 312
 313	edev->ops->get_vport_stats(edev->cdev, &stats);
 314
 
 
 315	p_common->no_buff_discards = stats.common.no_buff_discards;
 316	p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
 317	p_common->ttl0_discard = stats.common.ttl0_discard;
 318	p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
 319	p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
 320	p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
 321	p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
 322	p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
 323	p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
 324	p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
 325	p_common->mac_filter_discards = stats.common.mac_filter_discards;
 326	p_common->gft_filter_drop = stats.common.gft_filter_drop;
 327
 328	p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
 329	p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
 330	p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
 331	p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
 332	p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
 333	p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
 334	p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
 335	p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
 336	p_common->coalesced_events = stats.common.tpa_coalesced_events;
 337	p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
 338	p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
 339	p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
 340
 341	p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
 342	p_common->rx_65_to_127_byte_packets =
 343	    stats.common.rx_65_to_127_byte_packets;
 344	p_common->rx_128_to_255_byte_packets =
 345	    stats.common.rx_128_to_255_byte_packets;
 346	p_common->rx_256_to_511_byte_packets =
 347	    stats.common.rx_256_to_511_byte_packets;
 348	p_common->rx_512_to_1023_byte_packets =
 349	    stats.common.rx_512_to_1023_byte_packets;
 350	p_common->rx_1024_to_1518_byte_packets =
 351	    stats.common.rx_1024_to_1518_byte_packets;
 352	p_common->rx_crc_errors = stats.common.rx_crc_errors;
 353	p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
 354	p_common->rx_pause_frames = stats.common.rx_pause_frames;
 355	p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
 356	p_common->rx_align_errors = stats.common.rx_align_errors;
 357	p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
 358	p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
 359	p_common->rx_jabbers = stats.common.rx_jabbers;
 360	p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
 361	p_common->rx_fragments = stats.common.rx_fragments;
 362	p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
 363	p_common->tx_65_to_127_byte_packets =
 364	    stats.common.tx_65_to_127_byte_packets;
 365	p_common->tx_128_to_255_byte_packets =
 366	    stats.common.tx_128_to_255_byte_packets;
 367	p_common->tx_256_to_511_byte_packets =
 368	    stats.common.tx_256_to_511_byte_packets;
 369	p_common->tx_512_to_1023_byte_packets =
 370	    stats.common.tx_512_to_1023_byte_packets;
 371	p_common->tx_1024_to_1518_byte_packets =
 372	    stats.common.tx_1024_to_1518_byte_packets;
 373	p_common->tx_pause_frames = stats.common.tx_pause_frames;
 374	p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
 375	p_common->brb_truncates = stats.common.brb_truncates;
 376	p_common->brb_discards = stats.common.brb_discards;
 377	p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
 378	p_common->link_change_count = stats.common.link_change_count;
 379	p_common->ptp_skip_txts = edev->ptp_skip_txts;
 380
 381	if (QEDE_IS_BB(edev)) {
 382		struct qede_stats_bb *p_bb = &edev->stats.bb;
 383
 384		p_bb->rx_1519_to_1522_byte_packets =
 385		    stats.bb.rx_1519_to_1522_byte_packets;
 386		p_bb->rx_1519_to_2047_byte_packets =
 387		    stats.bb.rx_1519_to_2047_byte_packets;
 388		p_bb->rx_2048_to_4095_byte_packets =
 389		    stats.bb.rx_2048_to_4095_byte_packets;
 390		p_bb->rx_4096_to_9216_byte_packets =
 391		    stats.bb.rx_4096_to_9216_byte_packets;
 392		p_bb->rx_9217_to_16383_byte_packets =
 393		    stats.bb.rx_9217_to_16383_byte_packets;
 394		p_bb->tx_1519_to_2047_byte_packets =
 395		    stats.bb.tx_1519_to_2047_byte_packets;
 396		p_bb->tx_2048_to_4095_byte_packets =
 397		    stats.bb.tx_2048_to_4095_byte_packets;
 398		p_bb->tx_4096_to_9216_byte_packets =
 399		    stats.bb.tx_4096_to_9216_byte_packets;
 400		p_bb->tx_9217_to_16383_byte_packets =
 401		    stats.bb.tx_9217_to_16383_byte_packets;
 402		p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
 403		p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
 404	} else {
 405		struct qede_stats_ah *p_ah = &edev->stats.ah;
 406
 407		p_ah->rx_1519_to_max_byte_packets =
 408		    stats.ah.rx_1519_to_max_byte_packets;
 409		p_ah->tx_1519_to_max_byte_packets =
 410		    stats.ah.tx_1519_to_max_byte_packets;
 411	}
 
 
 412}
 413
 414static void qede_get_stats64(struct net_device *dev,
 415			     struct rtnl_link_stats64 *stats)
 416{
 417	struct qede_dev *edev = netdev_priv(dev);
 418	struct qede_stats_common *p_common;
 419
 420	qede_fill_by_demand_stats(edev);
 421	p_common = &edev->stats.common;
 422
 
 
 423	stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
 424			    p_common->rx_bcast_pkts;
 425	stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
 426			    p_common->tx_bcast_pkts;
 427
 428	stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
 429			  p_common->rx_bcast_bytes;
 430	stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
 431			  p_common->tx_bcast_bytes;
 432
 433	stats->tx_errors = p_common->tx_err_drop_pkts;
 434	stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
 435
 436	stats->rx_fifo_errors = p_common->no_buff_discards;
 437
 438	if (QEDE_IS_BB(edev))
 439		stats->collisions = edev->stats.bb.tx_total_collisions;
 440	stats->rx_crc_errors = p_common->rx_crc_errors;
 441	stats->rx_frame_errors = p_common->rx_align_errors;
 
 
 442}
 443
 444#ifdef CONFIG_QED_SRIOV
 445static int qede_get_vf_config(struct net_device *dev, int vfidx,
 446			      struct ifla_vf_info *ivi)
 447{
 448	struct qede_dev *edev = netdev_priv(dev);
 449
 450	if (!edev->ops)
 451		return -EINVAL;
 452
 453	return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
 454}
 455
 456static int qede_set_vf_rate(struct net_device *dev, int vfidx,
 457			    int min_tx_rate, int max_tx_rate)
 458{
 459	struct qede_dev *edev = netdev_priv(dev);
 460
 461	return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
 462					max_tx_rate);
 463}
 464
 465static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
 466{
 467	struct qede_dev *edev = netdev_priv(dev);
 468
 469	if (!edev->ops)
 470		return -EINVAL;
 471
 472	return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
 473}
 474
 475static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
 476				  int link_state)
 477{
 478	struct qede_dev *edev = netdev_priv(dev);
 479
 480	if (!edev->ops)
 481		return -EINVAL;
 482
 483	return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
 484}
 485
 486static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
 487{
 488	struct qede_dev *edev = netdev_priv(dev);
 489
 490	if (!edev->ops)
 491		return -EINVAL;
 492
 493	return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
 494}
 495#endif
 496
 497static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 498{
 499	struct qede_dev *edev = netdev_priv(dev);
 500
 501	if (!netif_running(dev))
 502		return -EAGAIN;
 503
 504	switch (cmd) {
 505	case SIOCSHWTSTAMP:
 506		return qede_ptp_hw_ts(edev, ifr);
 507	default:
 508		DP_VERBOSE(edev, QED_MSG_DEBUG,
 509			   "default IOCTL cmd 0x%x\n", cmd);
 510		return -EOPNOTSUPP;
 511	}
 512
 513	return 0;
 514}
 515
 516static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517{
 
 
 
 
 
 
 
 
 
 518	DP_NOTICE(edev,
 519		  "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
 520		  txq->index, le16_to_cpu(*txq->hw_cons_ptr),
 521		  qed_chain_get_cons_idx(&txq->tx_pbl),
 522		  qed_chain_get_prod_idx(&txq->tx_pbl),
 523		  jiffies);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524}
 525
 526static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
 527{
 528	struct qede_dev *edev = netdev_priv(dev);
 529	struct qede_tx_queue *txq;
 530	int cos;
 531
 532	netif_carrier_off(dev);
 533	DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
 534
 535	if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
 536		return;
 
 
 
 
 
 
 
 
 
 537
 538	for_each_cos_in_txq(edev, cos) {
 539		txq = &edev->fp_array[txqueue].txq[cos];
 540
 541		if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
 542		    qed_chain_get_prod_idx(&txq->tx_pbl))
 543			qede_tx_log_print(edev, txq);
 
 544	}
 545
 546	if (IS_VF(edev))
 547		return;
 548
 549	if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
 550	    edev->state == QEDE_STATE_RECOVERY) {
 551		DP_INFO(edev,
 552			"Avoid handling a Tx timeout while another HW error is being handled\n");
 553		return;
 554	}
 555
 556	set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
 557	set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
 558	schedule_delayed_work(&edev->sp_task, 0);
 559}
 560
 561static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
 562{
 563	struct qede_dev *edev = netdev_priv(ndev);
 564	int cos, count, offset;
 565
 566	if (num_tc > edev->dev_info.num_tc)
 567		return -EINVAL;
 568
 569	netdev_reset_tc(ndev);
 570	netdev_set_num_tc(ndev, num_tc);
 571
 572	for_each_cos_in_txq(edev, cos) {
 573		count = QEDE_TSS_COUNT(edev);
 574		offset = cos * QEDE_TSS_COUNT(edev);
 575		netdev_set_tc_queue(ndev, cos, count, offset);
 576	}
 577
 578	return 0;
 579}
 580
 581static int
 582qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
 583		__be16 proto)
 584{
 585	switch (f->command) {
 586	case FLOW_CLS_REPLACE:
 587		return qede_add_tc_flower_fltr(edev, proto, f);
 588	case FLOW_CLS_DESTROY:
 589		return qede_delete_flow_filter(edev, f->cookie);
 590	default:
 591		return -EOPNOTSUPP;
 592	}
 593}
 594
 595static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 596				  void *cb_priv)
 597{
 598	struct flow_cls_offload *f;
 599	struct qede_dev *edev = cb_priv;
 600
 601	if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
 602		return -EOPNOTSUPP;
 603
 604	switch (type) {
 605	case TC_SETUP_CLSFLOWER:
 606		f = type_data;
 607		return qede_set_flower(edev, f, f->common.protocol);
 608	default:
 609		return -EOPNOTSUPP;
 610	}
 611}
 612
 613static LIST_HEAD(qede_block_cb_list);
 614
 615static int
 616qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
 617		      void *type_data)
 618{
 619	struct qede_dev *edev = netdev_priv(dev);
 620	struct tc_mqprio_qopt *mqprio;
 621
 622	switch (type) {
 623	case TC_SETUP_BLOCK:
 624		return flow_block_cb_setup_simple(type_data,
 625						  &qede_block_cb_list,
 626						  qede_setup_tc_block_cb,
 627						  edev, edev, true);
 628	case TC_SETUP_QDISC_MQPRIO:
 629		mqprio = type_data;
 630
 631		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
 632		return qede_setup_tc(dev, mqprio->num_tc);
 633	default:
 634		return -EOPNOTSUPP;
 635	}
 636}
 637
 638static const struct net_device_ops qede_netdev_ops = {
 639	.ndo_open		= qede_open,
 640	.ndo_stop		= qede_close,
 641	.ndo_start_xmit		= qede_start_xmit,
 642	.ndo_select_queue	= qede_select_queue,
 643	.ndo_set_rx_mode	= qede_set_rx_mode,
 644	.ndo_set_mac_address	= qede_set_mac_addr,
 645	.ndo_validate_addr	= eth_validate_addr,
 646	.ndo_change_mtu		= qede_change_mtu,
 647	.ndo_do_ioctl		= qede_ioctl,
 648	.ndo_tx_timeout		= qede_tx_timeout,
 649#ifdef CONFIG_QED_SRIOV
 650	.ndo_set_vf_mac		= qede_set_vf_mac,
 651	.ndo_set_vf_vlan	= qede_set_vf_vlan,
 652	.ndo_set_vf_trust	= qede_set_vf_trust,
 653#endif
 654	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
 655	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
 656	.ndo_fix_features	= qede_fix_features,
 657	.ndo_set_features	= qede_set_features,
 658	.ndo_get_stats64	= qede_get_stats64,
 659#ifdef CONFIG_QED_SRIOV
 660	.ndo_set_vf_link_state	= qede_set_vf_link_state,
 661	.ndo_set_vf_spoofchk	= qede_set_vf_spoofchk,
 662	.ndo_get_vf_config	= qede_get_vf_config,
 663	.ndo_set_vf_rate	= qede_set_vf_rate,
 664#endif
 665	.ndo_features_check	= qede_features_check,
 666	.ndo_bpf		= qede_xdp,
 667#ifdef CONFIG_RFS_ACCEL
 668	.ndo_rx_flow_steer	= qede_rx_flow_steer,
 669#endif
 670	.ndo_xdp_xmit		= qede_xdp_transmit,
 671	.ndo_setup_tc		= qede_setup_tc_offload,
 672};
 673
 674static const struct net_device_ops qede_netdev_vf_ops = {
 675	.ndo_open		= qede_open,
 676	.ndo_stop		= qede_close,
 677	.ndo_start_xmit		= qede_start_xmit,
 678	.ndo_select_queue	= qede_select_queue,
 679	.ndo_set_rx_mode	= qede_set_rx_mode,
 680	.ndo_set_mac_address	= qede_set_mac_addr,
 681	.ndo_validate_addr	= eth_validate_addr,
 682	.ndo_change_mtu		= qede_change_mtu,
 683	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
 684	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
 685	.ndo_fix_features	= qede_fix_features,
 686	.ndo_set_features	= qede_set_features,
 687	.ndo_get_stats64	= qede_get_stats64,
 688	.ndo_features_check	= qede_features_check,
 689};
 690
 691static const struct net_device_ops qede_netdev_vf_xdp_ops = {
 692	.ndo_open		= qede_open,
 693	.ndo_stop		= qede_close,
 694	.ndo_start_xmit		= qede_start_xmit,
 695	.ndo_select_queue	= qede_select_queue,
 696	.ndo_set_rx_mode	= qede_set_rx_mode,
 697	.ndo_set_mac_address	= qede_set_mac_addr,
 698	.ndo_validate_addr	= eth_validate_addr,
 699	.ndo_change_mtu		= qede_change_mtu,
 700	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
 701	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
 702	.ndo_fix_features	= qede_fix_features,
 703	.ndo_set_features	= qede_set_features,
 704	.ndo_get_stats64	= qede_get_stats64,
 705	.ndo_features_check	= qede_features_check,
 706	.ndo_bpf		= qede_xdp,
 707	.ndo_xdp_xmit		= qede_xdp_transmit,
 708};
 709
 710/* -------------------------------------------------------------------------
 711 * START OF PROBE / REMOVE
 712 * -------------------------------------------------------------------------
 713 */
 714
 715static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
 716					    struct pci_dev *pdev,
 717					    struct qed_dev_eth_info *info,
 718					    u32 dp_module, u8 dp_level)
 719{
 720	struct net_device *ndev;
 721	struct qede_dev *edev;
 722
 723	ndev = alloc_etherdev_mqs(sizeof(*edev),
 724				  info->num_queues * info->num_tc,
 725				  info->num_queues);
 726	if (!ndev) {
 727		pr_err("etherdev allocation failed\n");
 728		return NULL;
 729	}
 730
 731	edev = netdev_priv(ndev);
 732	edev->ndev = ndev;
 733	edev->cdev = cdev;
 734	edev->pdev = pdev;
 735	edev->dp_module = dp_module;
 736	edev->dp_level = dp_level;
 737	edev->ops = qed_ops;
 738
 739	if (is_kdump_kernel()) {
 740		edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
 741		edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
 742	} else {
 743		edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
 744		edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 745	}
 746
 747	DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
 748		info->num_queues, info->num_queues);
 749
 750	SET_NETDEV_DEV(ndev, &pdev->dev);
 751
 752	memset(&edev->stats, 0, sizeof(edev->stats));
 753	memcpy(&edev->dev_info, info, sizeof(*info));
 754
 755	/* As ethtool doesn't have the ability to show WoL behavior as
 756	 * 'default', if device supports it declare it's enabled.
 757	 */
 758	if (edev->dev_info.common.wol_support)
 759		edev->wol_enabled = true;
 760
 761	INIT_LIST_HEAD(&edev->vlan_list);
 762
 763	return edev;
 764}
 765
 766static void qede_init_ndev(struct qede_dev *edev)
 767{
 768	struct net_device *ndev = edev->ndev;
 769	struct pci_dev *pdev = edev->pdev;
 770	bool udp_tunnel_enable = false;
 771	netdev_features_t hw_features;
 772
 773	pci_set_drvdata(pdev, ndev);
 774
 775	ndev->mem_start = edev->dev_info.common.pci_mem_start;
 776	ndev->base_addr = ndev->mem_start;
 777	ndev->mem_end = edev->dev_info.common.pci_mem_end;
 778	ndev->irq = edev->dev_info.common.pci_irq;
 779
 780	ndev->watchdog_timeo = TX_TIMEOUT;
 781
 782	if (IS_VF(edev)) {
 783		if (edev->dev_info.xdp_supported)
 784			ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
 785		else
 786			ndev->netdev_ops = &qede_netdev_vf_ops;
 787	} else {
 788		ndev->netdev_ops = &qede_netdev_ops;
 789	}
 790
 791	qede_set_ethtool_ops(ndev);
 792
 793	ndev->priv_flags |= IFF_UNICAST_FLT;
 794
 795	/* user-changeble features */
 796	hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
 797		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 798		      NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
 799
 800	if (edev->dev_info.common.b_arfs_capable)
 801		hw_features |= NETIF_F_NTUPLE;
 802
 803	if (edev->dev_info.common.vxlan_enable ||
 804	    edev->dev_info.common.geneve_enable)
 805		udp_tunnel_enable = true;
 806
 807	if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
 808		hw_features |= NETIF_F_TSO_ECN;
 809		ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 810					NETIF_F_SG | NETIF_F_TSO |
 811					NETIF_F_TSO_ECN | NETIF_F_TSO6 |
 812					NETIF_F_RXCSUM;
 813	}
 814
 815	if (udp_tunnel_enable) {
 816		hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
 817				NETIF_F_GSO_UDP_TUNNEL_CSUM);
 818		ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
 819					  NETIF_F_GSO_UDP_TUNNEL_CSUM);
 820
 821		qede_set_udp_tunnels(edev);
 822	}
 823
 824	if (edev->dev_info.common.gre_enable) {
 825		hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
 826		ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
 827					  NETIF_F_GSO_GRE_CSUM);
 828	}
 829
 830	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
 831			      NETIF_F_HIGHDMA;
 832	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
 833			 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
 834			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
 835
 836	ndev->hw_features = hw_features;
 837
 
 
 
 838	/* MTU range: 46 - 9600 */
 839	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
 840	ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
 841
 842	/* Set network device HW mac */
 843	ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
 844
 845	ndev->mtu = edev->dev_info.common.mtu;
 846}
 847
 848/* This function converts from 32b param to two params of level and module
 849 * Input 32b decoding:
 850 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
 851 * 'happy' flow, e.g. memory allocation failed.
 852 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
 853 * and provide important parameters.
 854 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
 855 * module. VERBOSE prints are for tracking the specific flow in low level.
 856 *
 857 * Notice that the level should be that of the lowest required logs.
 858 */
 859void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
 860{
 861	*p_dp_level = QED_LEVEL_NOTICE;
 862	*p_dp_module = 0;
 863
 864	if (debug & QED_LOG_VERBOSE_MASK) {
 865		*p_dp_level = QED_LEVEL_VERBOSE;
 866		*p_dp_module = (debug & 0x3FFFFFFF);
 867	} else if (debug & QED_LOG_INFO_MASK) {
 868		*p_dp_level = QED_LEVEL_INFO;
 869	} else if (debug & QED_LOG_NOTICE_MASK) {
 870		*p_dp_level = QED_LEVEL_NOTICE;
 871	}
 872}
 873
 874static void qede_free_fp_array(struct qede_dev *edev)
 875{
 876	if (edev->fp_array) {
 877		struct qede_fastpath *fp;
 878		int i;
 879
 880		for_each_queue(i) {
 881			fp = &edev->fp_array[i];
 882
 883			kfree(fp->sb_info);
 884			/* Handle mem alloc failure case where qede_init_fp
 885			 * didn't register xdp_rxq_info yet.
 886			 * Implicit only (fp->type & QEDE_FASTPATH_RX)
 887			 */
 888			if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
 889				xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
 890			kfree(fp->rxq);
 891			kfree(fp->xdp_tx);
 892			kfree(fp->txq);
 893		}
 894		kfree(edev->fp_array);
 895	}
 896
 897	edev->num_queues = 0;
 898	edev->fp_num_tx = 0;
 899	edev->fp_num_rx = 0;
 900}
 901
 902static int qede_alloc_fp_array(struct qede_dev *edev)
 903{
 904	u8 fp_combined, fp_rx = edev->fp_num_rx;
 905	struct qede_fastpath *fp;
 906	void *mem;
 907	int i;
 908
 909	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
 910				 sizeof(*edev->fp_array), GFP_KERNEL);
 911	if (!edev->fp_array) {
 912		DP_NOTICE(edev, "fp array allocation failed\n");
 913		goto err;
 914	}
 915
 916	mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
 917		       sizeof(*edev->coal_entry), GFP_KERNEL);
 918	if (!mem) {
 919		DP_ERR(edev, "coalesce entry allocation failed\n");
 920		kfree(edev->coal_entry);
 921		goto err;
 
 
 922	}
 923	edev->coal_entry = mem;
 924
 925	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
 926
 927	/* Allocate the FP elements for Rx queues followed by combined and then
 928	 * the Tx. This ordering should be maintained so that the respective
 929	 * queues (Rx or Tx) will be together in the fastpath array and the
 930	 * associated ids will be sequential.
 931	 */
 932	for_each_queue(i) {
 933		fp = &edev->fp_array[i];
 934
 935		fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
 936		if (!fp->sb_info) {
 937			DP_NOTICE(edev, "sb info struct allocation failed\n");
 938			goto err;
 939		}
 940
 941		if (fp_rx) {
 942			fp->type = QEDE_FASTPATH_RX;
 943			fp_rx--;
 944		} else if (fp_combined) {
 945			fp->type = QEDE_FASTPATH_COMBINED;
 946			fp_combined--;
 947		} else {
 948			fp->type = QEDE_FASTPATH_TX;
 949		}
 950
 951		if (fp->type & QEDE_FASTPATH_TX) {
 952			fp->txq = kcalloc(edev->dev_info.num_tc,
 953					  sizeof(*fp->txq), GFP_KERNEL);
 954			if (!fp->txq)
 955				goto err;
 956		}
 957
 958		if (fp->type & QEDE_FASTPATH_RX) {
 959			fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
 960			if (!fp->rxq)
 961				goto err;
 962
 963			if (edev->xdp_prog) {
 964				fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
 965						     GFP_KERNEL);
 966				if (!fp->xdp_tx)
 967					goto err;
 968				fp->type |= QEDE_FASTPATH_XDP;
 969			}
 970		}
 971	}
 972
 973	return 0;
 974err:
 975	qede_free_fp_array(edev);
 976	return -ENOMEM;
 977}
 978
 979/* The qede lock is used to protect driver state change and driver flows that
 980 * are not reentrant.
 981 */
 982void __qede_lock(struct qede_dev *edev)
 983{
 984	mutex_lock(&edev->qede_lock);
 985}
 986
 987void __qede_unlock(struct qede_dev *edev)
 988{
 989	mutex_unlock(&edev->qede_lock);
 990}
 991
 992/* This version of the lock should be used when acquiring the RTNL lock is also
 993 * needed in addition to the internal qede lock.
 994 */
 995static void qede_lock(struct qede_dev *edev)
 996{
 997	rtnl_lock();
 998	__qede_lock(edev);
 999}
1000
1001static void qede_unlock(struct qede_dev *edev)
1002{
1003	__qede_unlock(edev);
1004	rtnl_unlock();
1005}
1006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007static void qede_sp_task(struct work_struct *work)
1008{
1009	struct qede_dev *edev = container_of(work, struct qede_dev,
1010					     sp_task.work);
1011
1012	/* Disable execution of this deferred work once
1013	 * qede removal is in progress, this stop any future
1014	 * scheduling of sp_task.
1015	 */
1016	if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
1017		return;
1018
1019	/* The locking scheme depends on the specific flag:
1020	 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1021	 * ensure that ongoing flows are ended and new ones are not started.
1022	 * In other cases - only the internal qede lock should be acquired.
1023	 */
1024
1025	if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
 
1026#ifdef CONFIG_QED_SRIOV
1027		/* SRIOV must be disabled outside the lock to avoid a deadlock.
1028		 * The recovery of the active VFs is currently not supported.
1029		 */
1030		if (pci_num_vf(edev->pdev))
1031			qede_sriov_configure(edev->pdev, 0);
1032#endif
1033		qede_lock(edev);
1034		qede_recovery_handler(edev);
1035		qede_unlock(edev);
1036	}
1037
1038	__qede_lock(edev);
1039
1040	if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1041		if (edev->state == QEDE_STATE_OPEN)
1042			qede_config_rx_mode(edev->ndev);
1043
1044#ifdef CONFIG_RFS_ACCEL
1045	if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1046		if (edev->state == QEDE_STATE_OPEN)
1047			qede_process_arfs_filters(edev, false);
1048	}
1049#endif
1050	if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1051		qede_generic_hw_err_handler(edev);
1052	__qede_unlock(edev);
1053
1054	if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1055#ifdef CONFIG_QED_SRIOV
1056		/* SRIOV must be disabled outside the lock to avoid a deadlock.
1057		 * The recovery of the active VFs is currently not supported.
1058		 */
1059		if (pci_num_vf(edev->pdev))
1060			qede_sriov_configure(edev->pdev, 0);
1061#endif
1062		edev->ops->common->recovery_process(edev->cdev);
1063	}
1064}
1065
1066static void qede_update_pf_params(struct qed_dev *cdev)
1067{
1068	struct qed_pf_params pf_params;
1069	u16 num_cons;
1070
1071	/* 64 rx + 64 tx + 64 XDP */
1072	memset(&pf_params, 0, sizeof(struct qed_pf_params));
1073
1074	/* 1 rx + 1 xdp + max tx cos */
1075	num_cons = QED_MIN_L2_CONS;
1076
1077	pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1078
1079	/* Same for VFs - make sure they'll have sufficient connections
1080	 * to support XDP Tx queues.
1081	 */
1082	pf_params.eth_pf_params.num_vf_cons = 48;
1083
1084	pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1085	qed_ops->common->update_pf_params(cdev, &pf_params);
1086}
1087
1088#define QEDE_FW_VER_STR_SIZE	80
1089
1090static void qede_log_probe(struct qede_dev *edev)
1091{
1092	struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1093	u8 buf[QEDE_FW_VER_STR_SIZE];
1094	size_t left_size;
1095
1096	snprintf(buf, QEDE_FW_VER_STR_SIZE,
1097		 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1098		 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1099		 p_dev_info->fw_eng,
1100		 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1101		 QED_MFW_VERSION_3_OFFSET,
1102		 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1103		 QED_MFW_VERSION_2_OFFSET,
1104		 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1105		 QED_MFW_VERSION_1_OFFSET,
1106		 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1107		 QED_MFW_VERSION_0_OFFSET);
1108
1109	left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1110	if (p_dev_info->mbi_version && left_size)
1111		snprintf(buf + strlen(buf), left_size,
1112			 " [MBI %d.%d.%d]",
1113			 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1114			 QED_MBI_VERSION_2_OFFSET,
1115			 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1116			 QED_MBI_VERSION_1_OFFSET,
1117			 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1118			 QED_MBI_VERSION_0_OFFSET);
1119
1120	pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1121		PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1122		buf, edev->ndev->name);
1123}
1124
1125enum qede_probe_mode {
1126	QEDE_PROBE_NORMAL,
1127	QEDE_PROBE_RECOVERY,
1128};
1129
1130static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1131			bool is_vf, enum qede_probe_mode mode)
1132{
1133	struct qed_probe_params probe_params;
1134	struct qed_slowpath_params sp_params;
1135	struct qed_dev_eth_info dev_info;
1136	struct qede_dev *edev;
1137	struct qed_dev *cdev;
1138	int rc;
1139
1140	if (unlikely(dp_level & QED_LEVEL_INFO))
1141		pr_notice("Starting qede probe\n");
1142
1143	memset(&probe_params, 0, sizeof(probe_params));
1144	probe_params.protocol = QED_PROTOCOL_ETH;
1145	probe_params.dp_module = dp_module;
1146	probe_params.dp_level = dp_level;
1147	probe_params.is_vf = is_vf;
1148	probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1149	cdev = qed_ops->common->probe(pdev, &probe_params);
1150	if (!cdev) {
1151		rc = -ENODEV;
1152		goto err0;
1153	}
1154
1155	qede_update_pf_params(cdev);
1156
1157	/* Start the Slowpath-process */
1158	memset(&sp_params, 0, sizeof(sp_params));
1159	sp_params.int_mode = QED_INT_MODE_MSIX;
1160	sp_params.drv_major = QEDE_MAJOR_VERSION;
1161	sp_params.drv_minor = QEDE_MINOR_VERSION;
1162	sp_params.drv_rev = QEDE_REVISION_VERSION;
1163	sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
1164	strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1165	rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1166	if (rc) {
1167		pr_notice("Cannot start slowpath\n");
1168		goto err1;
1169	}
1170
1171	/* Learn information crucial for qede to progress */
1172	rc = qed_ops->fill_dev_info(cdev, &dev_info);
1173	if (rc)
1174		goto err2;
1175
1176	if (mode != QEDE_PROBE_RECOVERY) {
1177		edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1178					   dp_level);
1179		if (!edev) {
1180			rc = -ENOMEM;
1181			goto err2;
1182		}
1183
1184		edev->devlink = qed_ops->common->devlink_register(cdev);
1185		if (IS_ERR(edev->devlink)) {
1186			DP_NOTICE(edev, "Cannot register devlink\n");
 
1187			edev->devlink = NULL;
1188			/* Go on, we can live without devlink */
1189		}
1190	} else {
1191		struct net_device *ndev = pci_get_drvdata(pdev);
 
1192
1193		edev = netdev_priv(ndev);
1194
1195		if (edev->devlink) {
1196			struct qed_devlink *qdl = devlink_priv(edev->devlink);
1197
1198			qdl->cdev = cdev;
1199		}
1200		edev->cdev = cdev;
1201		memset(&edev->stats, 0, sizeof(edev->stats));
1202		memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1203	}
1204
1205	if (is_vf)
1206		set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1207
1208	qede_init_ndev(edev);
1209
1210	rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1211	if (rc)
1212		goto err3;
1213
1214	if (mode != QEDE_PROBE_RECOVERY) {
1215		/* Prepare the lock prior to the registration of the netdev,
1216		 * as once it's registered we might reach flows requiring it
1217		 * [it's even possible to reach a flow needing it directly
1218		 * from there, although it's unlikely].
1219		 */
1220		INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1221		mutex_init(&edev->qede_lock);
 
1222
1223		rc = register_netdev(edev->ndev);
1224		if (rc) {
1225			DP_NOTICE(edev, "Cannot register net-device\n");
1226			goto err4;
1227		}
1228	}
1229
1230	edev->ops->common->set_name(cdev, edev->ndev->name);
1231
1232	/* PTP not supported on VFs */
1233	if (!is_vf)
1234		qede_ptp_enable(edev);
1235
1236	edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1237
1238#ifdef CONFIG_DCB
1239	if (!IS_VF(edev))
1240		qede_set_dcbnl_ops(edev->ndev);
1241#endif
1242
1243	edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1244
1245	qede_log_probe(edev);
 
 
 
 
 
1246	return 0;
1247
1248err4:
1249	qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1250err3:
1251	if (mode != QEDE_PROBE_RECOVERY)
1252		free_netdev(edev->ndev);
1253	else
1254		edev->cdev = NULL;
1255err2:
1256	qed_ops->common->slowpath_stop(cdev);
1257err1:
1258	qed_ops->common->remove(cdev);
1259err0:
1260	return rc;
1261}
1262
1263static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1264{
1265	bool is_vf = false;
1266	u32 dp_module = 0;
1267	u8 dp_level = 0;
1268
1269	switch ((enum qede_pci_private)id->driver_data) {
1270	case QEDE_PRIVATE_VF:
1271		if (debug & QED_LOG_VERBOSE_MASK)
1272			dev_err(&pdev->dev, "Probing a VF\n");
1273		is_vf = true;
1274		break;
1275	default:
1276		if (debug & QED_LOG_VERBOSE_MASK)
1277			dev_err(&pdev->dev, "Probing a PF\n");
1278	}
1279
1280	qede_config_debug(debug, &dp_module, &dp_level);
1281
1282	return __qede_probe(pdev, dp_module, dp_level, is_vf,
1283			    QEDE_PROBE_NORMAL);
1284}
1285
1286enum qede_remove_mode {
1287	QEDE_REMOVE_NORMAL,
1288	QEDE_REMOVE_RECOVERY,
1289};
1290
1291static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1292{
1293	struct net_device *ndev = pci_get_drvdata(pdev);
1294	struct qede_dev *edev;
1295	struct qed_dev *cdev;
1296
1297	if (!ndev) {
1298		dev_info(&pdev->dev, "Device has already been removed\n");
1299		return;
1300	}
1301
1302	edev = netdev_priv(ndev);
1303	cdev = edev->cdev;
1304
1305	DP_INFO(edev, "Starting qede_remove\n");
1306
1307	qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1308
1309	if (mode != QEDE_REMOVE_RECOVERY) {
1310		set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1311		unregister_netdev(ndev);
1312
1313		cancel_delayed_work_sync(&edev->sp_task);
 
1314
1315		edev->ops->common->set_power_state(cdev, PCI_D0);
1316
1317		pci_set_drvdata(pdev, NULL);
1318	}
1319
1320	qede_ptp_disable(edev);
1321
1322	/* Use global ops since we've freed edev */
1323	qed_ops->common->slowpath_stop(cdev);
1324	if (system_state == SYSTEM_POWER_OFF)
1325		return;
1326
1327	if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1328		qed_ops->common->devlink_unregister(edev->devlink);
1329		edev->devlink = NULL;
1330	}
1331	qed_ops->common->remove(cdev);
1332	edev->cdev = NULL;
1333
1334	/* Since this can happen out-of-sync with other flows,
1335	 * don't release the netdevice until after slowpath stop
1336	 * has been called to guarantee various other contexts
1337	 * [e.g., QED register callbacks] won't break anything when
1338	 * accessing the netdevice.
1339	 */
1340	if (mode != QEDE_REMOVE_RECOVERY) {
1341		kfree(edev->coal_entry);
1342		free_netdev(ndev);
1343	}
1344
1345	dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1346}
1347
1348static void qede_remove(struct pci_dev *pdev)
1349{
1350	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1351}
1352
1353static void qede_shutdown(struct pci_dev *pdev)
1354{
1355	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1356}
1357
1358/* -------------------------------------------------------------------------
1359 * START OF LOAD / UNLOAD
1360 * -------------------------------------------------------------------------
1361 */
1362
1363static int qede_set_num_queues(struct qede_dev *edev)
1364{
1365	int rc;
1366	u16 rss_num;
1367
1368	/* Setup queues according to possible resources*/
1369	if (edev->req_queues)
1370		rss_num = edev->req_queues;
1371	else
1372		rss_num = netif_get_num_default_rss_queues() *
1373			  edev->dev_info.common.num_hwfns;
1374
1375	rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1376
1377	rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1378	if (rc > 0) {
1379		/* Managed to request interrupts for our queues */
1380		edev->num_queues = rc;
1381		DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1382			QEDE_QUEUE_CNT(edev), rss_num);
1383		rc = 0;
1384	}
1385
1386	edev->fp_num_tx = edev->req_num_tx;
1387	edev->fp_num_rx = edev->req_num_rx;
1388
1389	return rc;
1390}
1391
1392static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1393			     u16 sb_id)
1394{
1395	if (sb_info->sb_virt) {
1396		edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1397					      QED_SB_TYPE_L2_QUEUE);
1398		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1399				  (void *)sb_info->sb_virt, sb_info->sb_phys);
1400		memset(sb_info, 0, sizeof(*sb_info));
1401	}
1402}
1403
1404/* This function allocates fast-path status block memory */
1405static int qede_alloc_mem_sb(struct qede_dev *edev,
1406			     struct qed_sb_info *sb_info, u16 sb_id)
1407{
1408	struct status_block_e4 *sb_virt;
1409	dma_addr_t sb_phys;
1410	int rc;
1411
1412	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1413				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1414	if (!sb_virt) {
1415		DP_ERR(edev, "Status block allocation failed\n");
1416		return -ENOMEM;
1417	}
1418
1419	rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1420					sb_virt, sb_phys, sb_id,
1421					QED_SB_TYPE_L2_QUEUE);
1422	if (rc) {
1423		DP_ERR(edev, "Status block initialization failed\n");
1424		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1425				  sb_virt, sb_phys);
1426		return rc;
1427	}
1428
1429	return 0;
1430}
1431
1432static void qede_free_rx_buffers(struct qede_dev *edev,
1433				 struct qede_rx_queue *rxq)
1434{
1435	u16 i;
1436
1437	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1438		struct sw_rx_data *rx_buf;
1439		struct page *data;
1440
1441		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1442		data = rx_buf->data;
1443
1444		dma_unmap_page(&edev->pdev->dev,
1445			       rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1446
1447		rx_buf->data = NULL;
1448		__free_page(data);
1449	}
1450}
1451
1452static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1453{
1454	/* Free rx buffers */
1455	qede_free_rx_buffers(edev, rxq);
1456
1457	/* Free the parallel SW ring */
1458	kfree(rxq->sw_rx_ring);
1459
1460	/* Free the real RQ ring used by FW */
1461	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1462	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1463}
1464
1465static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1466{
1467	int i;
1468
1469	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1470		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1471
1472		tpa_info->state = QEDE_AGG_STATE_NONE;
1473	}
1474}
1475
1476/* This function allocates all memory needed per Rx queue */
1477static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1478{
1479	struct qed_chain_init_params params = {
1480		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1481		.num_elems	= RX_RING_SIZE,
1482	};
1483	struct qed_dev *cdev = edev->cdev;
1484	int i, rc, size;
1485
1486	rxq->num_rx_buffers = edev->q_num_rx_buffers;
1487
1488	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1489
1490	rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1491	size = rxq->rx_headroom +
1492	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1493
1494	/* Make sure that the headroom and  payload fit in a single page */
1495	if (rxq->rx_buf_size + size > PAGE_SIZE)
1496		rxq->rx_buf_size = PAGE_SIZE - size;
1497
1498	/* Segment size to split a page in multiple equal parts,
1499	 * unless XDP is used in which case we'd use the entire page.
1500	 */
1501	if (!edev->xdp_prog) {
1502		size = size + rxq->rx_buf_size;
1503		rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1504	} else {
1505		rxq->rx_buf_seg_size = PAGE_SIZE;
1506		edev->ndev->features &= ~NETIF_F_GRO_HW;
1507	}
1508
1509	/* Allocate the parallel driver ring for Rx buffers */
1510	size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1511	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1512	if (!rxq->sw_rx_ring) {
1513		DP_ERR(edev, "Rx buffers ring allocation failed\n");
1514		rc = -ENOMEM;
1515		goto err;
1516	}
1517
1518	/* Allocate FW Rx ring  */
1519	params.mode = QED_CHAIN_MODE_NEXT_PTR;
1520	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1521	params.elem_size = sizeof(struct eth_rx_bd);
1522
1523	rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params);
1524	if (rc)
1525		goto err;
1526
1527	/* Allocate FW completion ring */
1528	params.mode = QED_CHAIN_MODE_PBL;
1529	params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1530	params.elem_size = sizeof(union eth_rx_cqe);
1531
1532	rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params);
1533	if (rc)
1534		goto err;
1535
1536	/* Allocate buffers for the Rx ring */
1537	rxq->filled_buffers = 0;
1538	for (i = 0; i < rxq->num_rx_buffers; i++) {
1539		rc = qede_alloc_rx_buffer(rxq, false);
1540		if (rc) {
1541			DP_ERR(edev,
1542			       "Rx buffers allocation failed at index %d\n", i);
1543			goto err;
1544		}
1545	}
1546
1547	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1548	if (!edev->gro_disable)
1549		qede_set_tpa_param(rxq);
1550err:
1551	return rc;
1552}
1553
1554static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1555{
1556	/* Free the parallel SW ring */
1557	if (txq->is_xdp)
1558		kfree(txq->sw_tx_ring.xdp);
1559	else
1560		kfree(txq->sw_tx_ring.skbs);
1561
1562	/* Free the real RQ ring used by FW */
1563	edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1564}
1565
1566/* This function allocates all memory needed per Tx queue */
1567static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1568{
1569	struct qed_chain_init_params params = {
1570		.mode		= QED_CHAIN_MODE_PBL,
1571		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1572		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1573		.num_elems	= edev->q_num_tx_buffers,
1574		.elem_size	= sizeof(union eth_tx_bd_types),
1575	};
1576	int size, rc;
1577
1578	txq->num_tx_buffers = edev->q_num_tx_buffers;
1579
1580	/* Allocate the parallel driver ring for Tx buffers */
1581	if (txq->is_xdp) {
1582		size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1583		txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1584		if (!txq->sw_tx_ring.xdp)
1585			goto err;
1586	} else {
1587		size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1588		txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1589		if (!txq->sw_tx_ring.skbs)
1590			goto err;
1591	}
1592
1593	rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params);
1594	if (rc)
1595		goto err;
1596
1597	return 0;
1598
1599err:
1600	qede_free_mem_txq(edev, txq);
1601	return -ENOMEM;
1602}
1603
1604/* This function frees all memory of a single fp */
1605static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1606{
1607	qede_free_mem_sb(edev, fp->sb_info, fp->id);
1608
1609	if (fp->type & QEDE_FASTPATH_RX)
1610		qede_free_mem_rxq(edev, fp->rxq);
1611
1612	if (fp->type & QEDE_FASTPATH_XDP)
1613		qede_free_mem_txq(edev, fp->xdp_tx);
1614
1615	if (fp->type & QEDE_FASTPATH_TX) {
1616		int cos;
1617
1618		for_each_cos_in_txq(edev, cos)
1619			qede_free_mem_txq(edev, &fp->txq[cos]);
1620	}
1621}
1622
1623/* This function allocates all memory needed for a single fp (i.e. an entity
1624 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1625 */
1626static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1627{
1628	int rc = 0;
1629
1630	rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1631	if (rc)
1632		goto out;
1633
1634	if (fp->type & QEDE_FASTPATH_RX) {
1635		rc = qede_alloc_mem_rxq(edev, fp->rxq);
1636		if (rc)
1637			goto out;
1638	}
1639
1640	if (fp->type & QEDE_FASTPATH_XDP) {
1641		rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1642		if (rc)
1643			goto out;
1644	}
1645
1646	if (fp->type & QEDE_FASTPATH_TX) {
1647		int cos;
1648
1649		for_each_cos_in_txq(edev, cos) {
1650			rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1651			if (rc)
1652				goto out;
1653		}
1654	}
1655
1656out:
1657	return rc;
1658}
1659
1660static void qede_free_mem_load(struct qede_dev *edev)
1661{
1662	int i;
1663
1664	for_each_queue(i) {
1665		struct qede_fastpath *fp = &edev->fp_array[i];
1666
1667		qede_free_mem_fp(edev, fp);
1668	}
1669}
1670
1671/* This function allocates all qede memory at NIC load. */
1672static int qede_alloc_mem_load(struct qede_dev *edev)
1673{
1674	int rc = 0, queue_id;
1675
1676	for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1677		struct qede_fastpath *fp = &edev->fp_array[queue_id];
1678
1679		rc = qede_alloc_mem_fp(edev, fp);
1680		if (rc) {
1681			DP_ERR(edev,
1682			       "Failed to allocate memory for fastpath - rss id = %d\n",
1683			       queue_id);
1684			qede_free_mem_load(edev);
1685			return rc;
1686		}
1687	}
1688
1689	return 0;
1690}
1691
1692static void qede_empty_tx_queue(struct qede_dev *edev,
1693				struct qede_tx_queue *txq)
1694{
1695	unsigned int pkts_compl = 0, bytes_compl = 0;
1696	struct netdev_queue *netdev_txq;
1697	int rc, len = 0;
1698
1699	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1700
1701	while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1702	       qed_chain_get_prod_idx(&txq->tx_pbl)) {
1703		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1704			   "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1705			   txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1706			   qed_chain_get_prod_idx(&txq->tx_pbl));
1707
1708		rc = qede_free_tx_pkt(edev, txq, &len);
1709		if (rc) {
1710			DP_NOTICE(edev,
1711				  "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1712				  txq->index,
1713				  qed_chain_get_cons_idx(&txq->tx_pbl),
1714				  qed_chain_get_prod_idx(&txq->tx_pbl));
1715			break;
1716		}
1717
1718		bytes_compl += len;
1719		pkts_compl++;
1720		txq->sw_tx_cons++;
1721	}
1722
1723	netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1724}
1725
1726static void qede_empty_tx_queues(struct qede_dev *edev)
1727{
1728	int i;
1729
1730	for_each_queue(i)
1731		if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1732			int cos;
1733
1734			for_each_cos_in_txq(edev, cos) {
1735				struct qede_fastpath *fp;
1736
1737				fp = &edev->fp_array[i];
1738				qede_empty_tx_queue(edev,
1739						    &fp->txq[cos]);
1740			}
1741		}
1742}
1743
1744/* This function inits fp content and resets the SB, RXQ and TXQ structures */
1745static void qede_init_fp(struct qede_dev *edev)
1746{
1747	int queue_id, rxq_index = 0, txq_index = 0;
1748	struct qede_fastpath *fp;
1749	bool init_xdp = false;
1750
1751	for_each_queue(queue_id) {
1752		fp = &edev->fp_array[queue_id];
1753
1754		fp->edev = edev;
1755		fp->id = queue_id;
1756
1757		if (fp->type & QEDE_FASTPATH_XDP) {
1758			fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1759								rxq_index);
1760			fp->xdp_tx->is_xdp = 1;
1761
1762			spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1763			init_xdp = true;
1764		}
1765
1766		if (fp->type & QEDE_FASTPATH_RX) {
1767			fp->rxq->rxq_id = rxq_index++;
1768
1769			/* Determine how to map buffers for this queue */
1770			if (fp->type & QEDE_FASTPATH_XDP)
1771				fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1772			else
1773				fp->rxq->data_direction = DMA_FROM_DEVICE;
1774			fp->rxq->dev = &edev->pdev->dev;
1775
1776			/* Driver have no error path from here */
1777			WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1778						 fp->rxq->rxq_id, 0) < 0);
1779
1780			if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1781						       MEM_TYPE_PAGE_ORDER0,
1782						       NULL)) {
1783				DP_NOTICE(edev,
1784					  "Failed to register XDP memory model\n");
1785			}
1786		}
1787
1788		if (fp->type & QEDE_FASTPATH_TX) {
1789			int cos;
1790
1791			for_each_cos_in_txq(edev, cos) {
1792				struct qede_tx_queue *txq = &fp->txq[cos];
1793				u16 ndev_tx_id;
1794
1795				txq->cos = cos;
1796				txq->index = txq_index;
1797				ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1798				txq->ndev_txq_id = ndev_tx_id;
1799
1800				if (edev->dev_info.is_legacy)
1801					txq->is_legacy = true;
1802				txq->dev = &edev->pdev->dev;
1803			}
1804
1805			txq_index++;
1806		}
1807
1808		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1809			 edev->ndev->name, queue_id);
1810	}
1811
1812	if (init_xdp) {
1813		edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1814		DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1815	}
1816}
1817
1818static int qede_set_real_num_queues(struct qede_dev *edev)
1819{
1820	int rc = 0;
1821
1822	rc = netif_set_real_num_tx_queues(edev->ndev,
1823					  QEDE_TSS_COUNT(edev) *
1824					  edev->dev_info.num_tc);
1825	if (rc) {
1826		DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1827		return rc;
1828	}
1829
1830	rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1831	if (rc) {
1832		DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1833		return rc;
1834	}
1835
1836	return 0;
1837}
1838
1839static void qede_napi_disable_remove(struct qede_dev *edev)
1840{
1841	int i;
1842
1843	for_each_queue(i) {
1844		napi_disable(&edev->fp_array[i].napi);
1845
1846		netif_napi_del(&edev->fp_array[i].napi);
1847	}
1848}
1849
1850static void qede_napi_add_enable(struct qede_dev *edev)
1851{
1852	int i;
1853
1854	/* Add NAPI objects */
1855	for_each_queue(i) {
1856		netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1857			       qede_poll, NAPI_POLL_WEIGHT);
1858		napi_enable(&edev->fp_array[i].napi);
1859	}
1860}
1861
1862static void qede_sync_free_irqs(struct qede_dev *edev)
1863{
1864	int i;
1865
1866	for (i = 0; i < edev->int_info.used_cnt; i++) {
1867		if (edev->int_info.msix_cnt) {
1868			synchronize_irq(edev->int_info.msix[i].vector);
1869			free_irq(edev->int_info.msix[i].vector,
1870				 &edev->fp_array[i]);
1871		} else {
1872			edev->ops->common->simd_handler_clean(edev->cdev, i);
1873		}
1874	}
1875
1876	edev->int_info.used_cnt = 0;
1877	edev->int_info.msix_cnt = 0;
1878}
1879
1880static int qede_req_msix_irqs(struct qede_dev *edev)
1881{
1882	int i, rc;
1883
1884	/* Sanitize number of interrupts == number of prepared RSS queues */
1885	if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1886		DP_ERR(edev,
1887		       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1888		       QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1889		return -EINVAL;
1890	}
1891
1892	for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1893#ifdef CONFIG_RFS_ACCEL
1894		struct qede_fastpath *fp = &edev->fp_array[i];
1895
1896		if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1897			rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1898					      edev->int_info.msix[i].vector);
1899			if (rc) {
1900				DP_ERR(edev, "Failed to add CPU rmap\n");
1901				qede_free_arfs(edev);
1902			}
1903		}
1904#endif
1905		rc = request_irq(edev->int_info.msix[i].vector,
1906				 qede_msix_fp_int, 0, edev->fp_array[i].name,
1907				 &edev->fp_array[i]);
1908		if (rc) {
1909			DP_ERR(edev, "Request fp %d irq failed\n", i);
 
 
 
 
 
 
1910			qede_sync_free_irqs(edev);
1911			return rc;
1912		}
1913		DP_VERBOSE(edev, NETIF_MSG_INTR,
1914			   "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1915			   edev->fp_array[i].name, i,
1916			   &edev->fp_array[i]);
1917		edev->int_info.used_cnt++;
1918	}
1919
1920	return 0;
1921}
1922
1923static void qede_simd_fp_handler(void *cookie)
1924{
1925	struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1926
1927	napi_schedule_irqoff(&fp->napi);
1928}
1929
1930static int qede_setup_irqs(struct qede_dev *edev)
1931{
1932	int i, rc = 0;
1933
1934	/* Learn Interrupt configuration */
1935	rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1936	if (rc)
1937		return rc;
1938
1939	if (edev->int_info.msix_cnt) {
1940		rc = qede_req_msix_irqs(edev);
1941		if (rc)
1942			return rc;
1943		edev->ndev->irq = edev->int_info.msix[0].vector;
1944	} else {
1945		const struct qed_common_ops *ops;
1946
1947		/* qed should learn receive the RSS ids and callbacks */
1948		ops = edev->ops->common;
1949		for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1950			ops->simd_handler_config(edev->cdev,
1951						 &edev->fp_array[i], i,
1952						 qede_simd_fp_handler);
1953		edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1954	}
1955	return 0;
1956}
1957
1958static int qede_drain_txq(struct qede_dev *edev,
1959			  struct qede_tx_queue *txq, bool allow_drain)
1960{
1961	int rc, cnt = 1000;
1962
1963	while (txq->sw_tx_cons != txq->sw_tx_prod) {
1964		if (!cnt) {
1965			if (allow_drain) {
1966				DP_NOTICE(edev,
1967					  "Tx queue[%d] is stuck, requesting MCP to drain\n",
1968					  txq->index);
1969				rc = edev->ops->common->drain(edev->cdev);
1970				if (rc)
1971					return rc;
1972				return qede_drain_txq(edev, txq, false);
1973			}
1974			DP_NOTICE(edev,
1975				  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1976				  txq->index, txq->sw_tx_prod,
1977				  txq->sw_tx_cons);
1978			return -ENODEV;
1979		}
1980		cnt--;
1981		usleep_range(1000, 2000);
1982		barrier();
1983	}
1984
1985	/* FW finished processing, wait for HW to transmit all tx packets */
1986	usleep_range(1000, 2000);
1987
1988	return 0;
1989}
1990
1991static int qede_stop_txq(struct qede_dev *edev,
1992			 struct qede_tx_queue *txq, int rss_id)
1993{
1994	/* delete doorbell from doorbell recovery mechanism */
1995	edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
1996					   &txq->tx_db);
1997
1998	return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1999}
2000
2001static int qede_stop_queues(struct qede_dev *edev)
2002{
2003	struct qed_update_vport_params *vport_update_params;
2004	struct qed_dev *cdev = edev->cdev;
2005	struct qede_fastpath *fp;
2006	int rc, i;
2007
2008	/* Disable the vport */
2009	vport_update_params = vzalloc(sizeof(*vport_update_params));
2010	if (!vport_update_params)
2011		return -ENOMEM;
2012
2013	vport_update_params->vport_id = 0;
2014	vport_update_params->update_vport_active_flg = 1;
2015	vport_update_params->vport_active_flg = 0;
2016	vport_update_params->update_rss_flg = 0;
2017
2018	rc = edev->ops->vport_update(cdev, vport_update_params);
2019	vfree(vport_update_params);
2020
2021	if (rc) {
2022		DP_ERR(edev, "Failed to update vport\n");
2023		return rc;
2024	}
2025
2026	/* Flush Tx queues. If needed, request drain from MCP */
2027	for_each_queue(i) {
2028		fp = &edev->fp_array[i];
2029
2030		if (fp->type & QEDE_FASTPATH_TX) {
2031			int cos;
2032
2033			for_each_cos_in_txq(edev, cos) {
2034				rc = qede_drain_txq(edev, &fp->txq[cos], true);
2035				if (rc)
2036					return rc;
2037			}
2038		}
2039
2040		if (fp->type & QEDE_FASTPATH_XDP) {
2041			rc = qede_drain_txq(edev, fp->xdp_tx, true);
2042			if (rc)
2043				return rc;
2044		}
2045	}
2046
2047	/* Stop all Queues in reverse order */
2048	for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2049		fp = &edev->fp_array[i];
2050
2051		/* Stop the Tx Queue(s) */
2052		if (fp->type & QEDE_FASTPATH_TX) {
2053			int cos;
2054
2055			for_each_cos_in_txq(edev, cos) {
2056				rc = qede_stop_txq(edev, &fp->txq[cos], i);
2057				if (rc)
2058					return rc;
2059			}
2060		}
2061
2062		/* Stop the Rx Queue */
2063		if (fp->type & QEDE_FASTPATH_RX) {
2064			rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2065			if (rc) {
2066				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2067				return rc;
2068			}
2069		}
2070
2071		/* Stop the XDP forwarding queue */
2072		if (fp->type & QEDE_FASTPATH_XDP) {
2073			rc = qede_stop_txq(edev, fp->xdp_tx, i);
2074			if (rc)
2075				return rc;
2076
2077			bpf_prog_put(fp->rxq->xdp_prog);
2078		}
2079	}
2080
2081	/* Stop the vport */
2082	rc = edev->ops->vport_stop(cdev, 0);
2083	if (rc)
2084		DP_ERR(edev, "Failed to stop VPORT\n");
2085
2086	return rc;
2087}
2088
2089static int qede_start_txq(struct qede_dev *edev,
2090			  struct qede_fastpath *fp,
2091			  struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2092{
2093	dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2094	u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2095	struct qed_queue_start_common_params params;
2096	struct qed_txq_start_ret_params ret_params;
2097	int rc;
2098
2099	memset(&params, 0, sizeof(params));
2100	memset(&ret_params, 0, sizeof(ret_params));
2101
2102	/* Let the XDP queue share the queue-zone with one of the regular txq.
2103	 * We don't really care about its coalescing.
2104	 */
2105	if (txq->is_xdp)
2106		params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2107	else
2108		params.queue_id = txq->index;
2109
2110	params.p_sb = fp->sb_info;
2111	params.sb_idx = sb_idx;
2112	params.tc = txq->cos;
2113
2114	rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
2115				   page_cnt, &ret_params);
2116	if (rc) {
2117		DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2118		return rc;
2119	}
2120
2121	txq->doorbell_addr = ret_params.p_doorbell;
2122	txq->handle = ret_params.p_handle;
2123
2124	/* Determine the FW consumer address associated */
2125	txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2126
2127	/* Prepare the doorbell parameters */
2128	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2129	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2130	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2131		  DQ_XCM_ETH_TX_BD_PROD_CMD);
2132	txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2133
2134	/* register doorbell with doorbell recovery mechanism */
2135	rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2136						&txq->tx_db, DB_REC_WIDTH_32B,
2137						DB_REC_KERNEL);
2138
2139	return rc;
2140}
2141
2142static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2143{
2144	int vlan_removal_en = 1;
2145	struct qed_dev *cdev = edev->cdev;
2146	struct qed_dev_info *qed_info = &edev->dev_info.common;
2147	struct qed_update_vport_params *vport_update_params;
2148	struct qed_queue_start_common_params q_params;
2149	struct qed_start_vport_params start = {0};
2150	int rc, i;
2151
2152	if (!edev->num_queues) {
2153		DP_ERR(edev,
2154		       "Cannot update V-VPORT as active as there are no Rx queues\n");
2155		return -EINVAL;
2156	}
2157
2158	vport_update_params = vzalloc(sizeof(*vport_update_params));
2159	if (!vport_update_params)
2160		return -ENOMEM;
2161
2162	start.handle_ptp_pkts = !!(edev->ptp);
2163	start.gro_enable = !edev->gro_disable;
2164	start.mtu = edev->ndev->mtu;
2165	start.vport_id = 0;
2166	start.drop_ttl0 = true;
2167	start.remove_inner_vlan = vlan_removal_en;
2168	start.clear_stats = clear_stats;
2169
2170	rc = edev->ops->vport_start(cdev, &start);
2171
2172	if (rc) {
2173		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2174		goto out;
2175	}
2176
2177	DP_VERBOSE(edev, NETIF_MSG_IFUP,
2178		   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2179		   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2180
2181	for_each_queue(i) {
2182		struct qede_fastpath *fp = &edev->fp_array[i];
2183		dma_addr_t p_phys_table;
2184		u32 page_cnt;
2185
2186		if (fp->type & QEDE_FASTPATH_RX) {
2187			struct qed_rxq_start_ret_params ret_params;
2188			struct qede_rx_queue *rxq = fp->rxq;
2189			__le16 *val;
2190
2191			memset(&ret_params, 0, sizeof(ret_params));
2192			memset(&q_params, 0, sizeof(q_params));
2193			q_params.queue_id = rxq->rxq_id;
2194			q_params.vport_id = 0;
2195			q_params.p_sb = fp->sb_info;
2196			q_params.sb_idx = RX_PI;
2197
2198			p_phys_table =
2199			    qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2200			page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2201
2202			rc = edev->ops->q_rx_start(cdev, i, &q_params,
2203						   rxq->rx_buf_size,
2204						   rxq->rx_bd_ring.p_phys_addr,
2205						   p_phys_table,
2206						   page_cnt, &ret_params);
2207			if (rc) {
2208				DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2209				       rc);
2210				goto out;
2211			}
2212
2213			/* Use the return parameters */
2214			rxq->hw_rxq_prod_addr = ret_params.p_prod;
2215			rxq->handle = ret_params.p_handle;
2216
2217			val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2218			rxq->hw_cons_ptr = val;
2219
2220			qede_update_rx_prod(edev, rxq);
2221		}
2222
2223		if (fp->type & QEDE_FASTPATH_XDP) {
2224			rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2225			if (rc)
2226				goto out;
2227
2228			bpf_prog_add(edev->xdp_prog, 1);
2229			fp->rxq->xdp_prog = edev->xdp_prog;
2230		}
2231
2232		if (fp->type & QEDE_FASTPATH_TX) {
2233			int cos;
2234
2235			for_each_cos_in_txq(edev, cos) {
2236				rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2237						    TX_PI(cos));
2238				if (rc)
2239					goto out;
2240			}
2241		}
2242	}
2243
2244	/* Prepare and send the vport enable */
2245	vport_update_params->vport_id = start.vport_id;
2246	vport_update_params->update_vport_active_flg = 1;
2247	vport_update_params->vport_active_flg = 1;
2248
2249	if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2250	    qed_info->tx_switching) {
2251		vport_update_params->update_tx_switching_flg = 1;
2252		vport_update_params->tx_switching_flg = 1;
2253	}
2254
2255	qede_fill_rss_params(edev, &vport_update_params->rss_params,
2256			     &vport_update_params->update_rss_flg);
2257
2258	rc = edev->ops->vport_update(cdev, vport_update_params);
2259	if (rc)
2260		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2261
2262out:
2263	vfree(vport_update_params);
2264	return rc;
2265}
2266
2267enum qede_unload_mode {
2268	QEDE_UNLOAD_NORMAL,
2269	QEDE_UNLOAD_RECOVERY,
2270};
2271
2272static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2273			bool is_locked)
2274{
2275	struct qed_link_params link_params;
2276	int rc;
2277
2278	DP_INFO(edev, "Starting qede unload\n");
2279
2280	if (!is_locked)
2281		__qede_lock(edev);
2282
2283	clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2284
2285	if (mode != QEDE_UNLOAD_RECOVERY)
2286		edev->state = QEDE_STATE_CLOSED;
2287
2288	qede_rdma_dev_event_close(edev);
2289
2290	/* Close OS Tx */
2291	netif_tx_disable(edev->ndev);
2292	netif_carrier_off(edev->ndev);
2293
2294	if (mode != QEDE_UNLOAD_RECOVERY) {
2295		/* Reset the link */
2296		memset(&link_params, 0, sizeof(link_params));
2297		link_params.link_up = false;
2298		edev->ops->common->set_link(edev->cdev, &link_params);
2299
2300		rc = qede_stop_queues(edev);
2301		if (rc) {
 
 
 
 
 
 
 
 
 
2302			qede_sync_free_irqs(edev);
2303			goto out;
2304		}
2305
2306		DP_INFO(edev, "Stopped Queues\n");
2307	}
2308
2309	qede_vlan_mark_nonconfigured(edev);
2310	edev->ops->fastpath_stop(edev->cdev);
2311
2312	if (edev->dev_info.common.b_arfs_capable) {
2313		qede_poll_for_freeing_arfs_filters(edev);
2314		qede_free_arfs(edev);
2315	}
2316
2317	/* Release the interrupts */
2318	qede_sync_free_irqs(edev);
2319	edev->ops->common->set_fp_int(edev->cdev, 0);
2320
2321	qede_napi_disable_remove(edev);
2322
2323	if (mode == QEDE_UNLOAD_RECOVERY)
2324		qede_empty_tx_queues(edev);
2325
2326	qede_free_mem_load(edev);
2327	qede_free_fp_array(edev);
2328
2329out:
2330	if (!is_locked)
2331		__qede_unlock(edev);
2332
2333	if (mode != QEDE_UNLOAD_RECOVERY)
2334		DP_NOTICE(edev, "Link is down\n");
2335
2336	edev->ptp_skip_txts = 0;
2337
2338	DP_INFO(edev, "Ending qede unload\n");
2339}
2340
2341enum qede_load_mode {
2342	QEDE_LOAD_NORMAL,
2343	QEDE_LOAD_RELOAD,
2344	QEDE_LOAD_RECOVERY,
2345};
2346
2347static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2348		     bool is_locked)
2349{
2350	struct qed_link_params link_params;
2351	struct ethtool_coalesce coal = {};
2352	u8 num_tc;
2353	int rc, i;
2354
2355	DP_INFO(edev, "Starting qede load\n");
2356
2357	if (!is_locked)
2358		__qede_lock(edev);
2359
2360	rc = qede_set_num_queues(edev);
2361	if (rc)
2362		goto out;
2363
2364	rc = qede_alloc_fp_array(edev);
2365	if (rc)
2366		goto out;
2367
2368	qede_init_fp(edev);
2369
2370	rc = qede_alloc_mem_load(edev);
2371	if (rc)
2372		goto err1;
2373	DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2374		QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2375
2376	rc = qede_set_real_num_queues(edev);
2377	if (rc)
2378		goto err2;
2379
2380	if (qede_alloc_arfs(edev)) {
2381		edev->ndev->features &= ~NETIF_F_NTUPLE;
2382		edev->dev_info.common.b_arfs_capable = false;
2383	}
2384
2385	qede_napi_add_enable(edev);
2386	DP_INFO(edev, "Napi added and enabled\n");
2387
2388	rc = qede_setup_irqs(edev);
2389	if (rc)
2390		goto err3;
2391	DP_INFO(edev, "Setup IRQs succeeded\n");
2392
2393	rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2394	if (rc)
2395		goto err4;
2396	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2397
2398	num_tc = netdev_get_num_tc(edev->ndev);
2399	num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2400	qede_setup_tc(edev->ndev, num_tc);
2401
2402	/* Program un-configured VLANs */
2403	qede_configure_vlan_filters(edev);
2404
2405	set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2406
2407	/* Ask for link-up using current configuration */
2408	memset(&link_params, 0, sizeof(link_params));
2409	link_params.link_up = true;
2410	edev->ops->common->set_link(edev->cdev, &link_params);
2411
2412	edev->state = QEDE_STATE_OPEN;
2413
2414	coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
2415	coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
2416
2417	for_each_queue(i) {
2418		if (edev->coal_entry[i].isvalid) {
2419			coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
2420			coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
2421		}
2422		__qede_unlock(edev);
2423		qede_set_per_coalesce(edev->ndev, i, &coal);
2424		__qede_lock(edev);
2425	}
2426	DP_INFO(edev, "Ending successfully qede load\n");
2427
2428	goto out;
2429err4:
2430	qede_sync_free_irqs(edev);
2431err3:
2432	qede_napi_disable_remove(edev);
2433err2:
2434	qede_free_mem_load(edev);
2435err1:
2436	edev->ops->common->set_fp_int(edev->cdev, 0);
2437	qede_free_fp_array(edev);
2438	edev->num_queues = 0;
2439	edev->fp_num_tx = 0;
2440	edev->fp_num_rx = 0;
2441out:
2442	if (!is_locked)
2443		__qede_unlock(edev);
2444
2445	return rc;
2446}
2447
2448/* 'func' should be able to run between unload and reload assuming interface
2449 * is actually running, or afterwards in case it's currently DOWN.
2450 */
2451void qede_reload(struct qede_dev *edev,
2452		 struct qede_reload_args *args, bool is_locked)
2453{
2454	if (!is_locked)
2455		__qede_lock(edev);
2456
2457	/* Since qede_lock is held, internal state wouldn't change even
2458	 * if netdev state would start transitioning. Check whether current
2459	 * internal configuration indicates device is up, then reload.
2460	 */
2461	if (edev->state == QEDE_STATE_OPEN) {
2462		qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2463		if (args)
2464			args->func(edev, args);
2465		qede_load(edev, QEDE_LOAD_RELOAD, true);
2466
2467		/* Since no one is going to do it for us, re-configure */
2468		qede_config_rx_mode(edev->ndev);
2469	} else if (args) {
2470		args->func(edev, args);
2471	}
2472
2473	if (!is_locked)
2474		__qede_unlock(edev);
2475}
2476
2477/* called with rtnl_lock */
2478static int qede_open(struct net_device *ndev)
2479{
2480	struct qede_dev *edev = netdev_priv(ndev);
2481	int rc;
2482
2483	netif_carrier_off(ndev);
2484
2485	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2486
2487	rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2488	if (rc)
2489		return rc;
2490
2491	udp_tunnel_nic_reset_ntf(ndev);
2492
2493	edev->ops->common->update_drv_state(edev->cdev, true);
2494
2495	return 0;
2496}
2497
2498static int qede_close(struct net_device *ndev)
2499{
2500	struct qede_dev *edev = netdev_priv(ndev);
2501
2502	qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2503
2504	if (edev->cdev)
2505		edev->ops->common->update_drv_state(edev->cdev, false);
2506
2507	return 0;
2508}
2509
2510static void qede_link_update(void *dev, struct qed_link_output *link)
2511{
2512	struct qede_dev *edev = dev;
2513
2514	if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2515		DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2516		return;
2517	}
2518
2519	if (link->link_up) {
2520		if (!netif_carrier_ok(edev->ndev)) {
2521			DP_NOTICE(edev, "Link is up\n");
2522			netif_tx_start_all_queues(edev->ndev);
2523			netif_carrier_on(edev->ndev);
2524			qede_rdma_dev_event_open(edev);
2525		}
2526	} else {
2527		if (netif_carrier_ok(edev->ndev)) {
2528			DP_NOTICE(edev, "Link is down\n");
2529			netif_tx_disable(edev->ndev);
2530			netif_carrier_off(edev->ndev);
2531			qede_rdma_dev_event_close(edev);
2532		}
2533	}
2534}
2535
2536static void qede_schedule_recovery_handler(void *dev)
2537{
2538	struct qede_dev *edev = dev;
2539
2540	if (edev->state == QEDE_STATE_RECOVERY) {
2541		DP_NOTICE(edev,
2542			  "Avoid scheduling a recovery handling since already in recovery state\n");
2543		return;
2544	}
2545
2546	set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2547	schedule_delayed_work(&edev->sp_task, 0);
2548
2549	DP_INFO(edev, "Scheduled a recovery handler\n");
2550}
2551
2552static void qede_recovery_failed(struct qede_dev *edev)
2553{
2554	netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2555
2556	netif_device_detach(edev->ndev);
2557
2558	if (edev->cdev)
2559		edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2560}
2561
2562static void qede_recovery_handler(struct qede_dev *edev)
2563{
2564	u32 curr_state = edev->state;
2565	int rc;
2566
2567	DP_NOTICE(edev, "Starting a recovery process\n");
2568
2569	/* No need to acquire first the qede_lock since is done by qede_sp_task
2570	 * before calling this function.
2571	 */
2572	edev->state = QEDE_STATE_RECOVERY;
2573
2574	edev->ops->common->recovery_prolog(edev->cdev);
2575
2576	if (curr_state == QEDE_STATE_OPEN)
2577		qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2578
2579	__qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2580
2581	rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2582			  IS_VF(edev), QEDE_PROBE_RECOVERY);
2583	if (rc) {
2584		edev->cdev = NULL;
2585		goto err;
2586	}
2587
2588	if (curr_state == QEDE_STATE_OPEN) {
2589		rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2590		if (rc)
2591			goto err;
2592
2593		qede_config_rx_mode(edev->ndev);
2594		udp_tunnel_nic_reset_ntf(edev->ndev);
2595	}
2596
2597	edev->state = curr_state;
2598
2599	DP_NOTICE(edev, "Recovery handling is done\n");
2600
2601	return;
2602
2603err:
2604	qede_recovery_failed(edev);
2605}
2606
2607static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2608{
2609	struct qed_dev *cdev = edev->cdev;
2610
2611	DP_NOTICE(edev,
2612		  "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2613		  edev->err_flags);
2614
2615	/* Get a call trace of the flow that led to the error */
2616	WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2617
2618	/* Prevent HW attentions from being reasserted */
2619	if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2620		edev->ops->common->attn_clr_enable(cdev, true);
2621
2622	DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2623}
2624
2625static void qede_generic_hw_err_handler(struct qede_dev *edev)
2626{
2627	DP_NOTICE(edev,
2628		  "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2629		  edev->err_flags);
2630
2631	if (edev->devlink)
 
2632		edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
 
2633
2634	clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2635
2636	DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2637}
2638
2639static void qede_set_hw_err_flags(struct qede_dev *edev,
2640				  enum qed_hw_err_type err_type)
2641{
2642	unsigned long err_flags = 0;
2643
2644	switch (err_type) {
2645	case QED_HW_ERR_DMAE_FAIL:
2646		set_bit(QEDE_ERR_WARN, &err_flags);
2647		fallthrough;
2648	case QED_HW_ERR_MFW_RESP_FAIL:
2649	case QED_HW_ERR_HW_ATTN:
2650	case QED_HW_ERR_RAMROD_FAIL:
2651	case QED_HW_ERR_FW_ASSERT:
2652		set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2653		set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
 
 
2654		break;
2655
2656	default:
2657		DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2658		break;
2659	}
2660
2661	edev->err_flags |= err_flags;
2662}
2663
2664static void qede_schedule_hw_err_handler(void *dev,
2665					 enum qed_hw_err_type err_type)
2666{
2667	struct qede_dev *edev = dev;
2668
2669	/* Fan failure cannot be masked by handling of another HW error or by a
2670	 * concurrent recovery process.
2671	 */
2672	if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2673	     edev->state == QEDE_STATE_RECOVERY) &&
2674	     err_type != QED_HW_ERR_FAN_FAIL) {
2675		DP_INFO(edev,
2676			"Avoid scheduling an error handling while another HW error is being handled\n");
2677		return;
2678	}
2679
2680	if (err_type >= QED_HW_ERR_LAST) {
2681		DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2682		clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2683		return;
2684	}
2685
2686	edev->last_err_type = err_type;
2687	qede_set_hw_err_flags(edev, err_type);
2688	qede_atomic_hw_err_handler(edev);
2689	set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2690	schedule_delayed_work(&edev->sp_task, 0);
2691
2692	DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2693}
2694
2695static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2696{
2697	struct netdev_queue *netdev_txq;
2698
2699	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2700	if (netif_xmit_stopped(netdev_txq))
2701		return true;
2702
2703	return false;
2704}
2705
2706static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2707{
2708	struct qede_dev *edev = dev;
2709	struct netdev_hw_addr *ha;
2710	int i;
2711
2712	if (edev->ndev->features & NETIF_F_IP_CSUM)
2713		data->feat_flags |= QED_TLV_IP_CSUM;
2714	if (edev->ndev->features & NETIF_F_TSO)
2715		data->feat_flags |= QED_TLV_LSO;
2716
2717	ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2718	eth_zero_addr(data->mac[1]);
2719	eth_zero_addr(data->mac[2]);
2720	/* Copy the first two UC macs */
2721	netif_addr_lock_bh(edev->ndev);
2722	i = 1;
2723	netdev_for_each_uc_addr(ha, edev->ndev) {
2724		ether_addr_copy(data->mac[i++], ha->addr);
2725		if (i == QED_TLV_MAC_COUNT)
2726			break;
2727	}
2728
2729	netif_addr_unlock_bh(edev->ndev);
2730}
2731
2732static void qede_get_eth_tlv_data(void *dev, void *data)
2733{
2734	struct qed_mfw_tlv_eth *etlv = data;
2735	struct qede_dev *edev = dev;
2736	struct qede_fastpath *fp;
2737	int i;
2738
2739	etlv->lso_maxoff_size = 0XFFFF;
2740	etlv->lso_maxoff_size_set = true;
2741	etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2742	etlv->lso_minseg_size_set = true;
2743	etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2744	etlv->prom_mode_set = true;
2745	etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2746	etlv->tx_descr_size_set = true;
2747	etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2748	etlv->rx_descr_size_set = true;
2749	etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2750	etlv->iov_offload_set = true;
2751
2752	/* Fill information regarding queues; Should be done under the qede
2753	 * lock to guarantee those don't change beneath our feet.
2754	 */
2755	etlv->txqs_empty = true;
2756	etlv->rxqs_empty = true;
2757	etlv->num_txqs_full = 0;
2758	etlv->num_rxqs_full = 0;
2759
2760	__qede_lock(edev);
2761	for_each_queue(i) {
2762		fp = &edev->fp_array[i];
2763		if (fp->type & QEDE_FASTPATH_TX) {
2764			struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2765
2766			if (txq->sw_tx_cons != txq->sw_tx_prod)
2767				etlv->txqs_empty = false;
2768			if (qede_is_txq_full(edev, txq))
2769				etlv->num_txqs_full++;
2770		}
2771		if (fp->type & QEDE_FASTPATH_RX) {
2772			if (qede_has_rx_work(fp->rxq))
2773				etlv->rxqs_empty = false;
2774
2775			/* This one is a bit tricky; Firmware might stop
2776			 * placing packets if ring is not yet full.
2777			 * Give an approximation.
2778			 */
2779			if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2780			    qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2781			    RX_RING_SIZE - 100)
2782				etlv->num_rxqs_full++;
2783		}
2784	}
2785	__qede_unlock(edev);
2786
2787	etlv->txqs_empty_set = true;
2788	etlv->rxqs_empty_set = true;
2789	etlv->num_txqs_full_set = true;
2790	etlv->num_rxqs_full_set = true;
2791}
2792
2793/**
2794 * qede_io_error_detected - called when PCI error is detected
 
2795 * @pdev: Pointer to PCI device
2796 * @state: The current pci connection state
 
 
2797 *
2798 * This function is called after a PCI bus error affecting
2799 * this device has been detected.
2800 */
2801static pci_ers_result_t
2802qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2803{
2804	struct net_device *dev = pci_get_drvdata(pdev);
2805	struct qede_dev *edev = netdev_priv(dev);
2806
2807	if (!edev)
2808		return PCI_ERS_RESULT_NONE;
2809
2810	DP_NOTICE(edev, "IO error detected [%d]\n", state);
2811
2812	__qede_lock(edev);
2813	if (edev->state == QEDE_STATE_RECOVERY) {
2814		DP_NOTICE(edev, "Device already in the recovery state\n");
2815		__qede_unlock(edev);
2816		return PCI_ERS_RESULT_NONE;
2817	}
2818
2819	/* PF handles the recovery of its VFs */
2820	if (IS_VF(edev)) {
2821		DP_VERBOSE(edev, QED_MSG_IOV,
2822			   "VF recovery is handled by its PF\n");
2823		__qede_unlock(edev);
2824		return PCI_ERS_RESULT_RECOVERED;
2825	}
2826
2827	/* Close OS Tx */
2828	netif_tx_disable(edev->ndev);
2829	netif_carrier_off(edev->ndev);
2830
2831	set_bit(QEDE_SP_AER, &edev->sp_flags);
2832	schedule_delayed_work(&edev->sp_task, 0);
2833
2834	__qede_unlock(edev);
2835
2836	return PCI_ERS_RESULT_CAN_RECOVER;
2837}