Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#include <linux/stddef.h>
   8#include <linux/pci.h>
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/delay.h>
  12#include <asm/byteorder.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/string.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/workqueue.h>
  18#include <linux/ethtool.h>
  19#include <linux/etherdevice.h>
  20#include <linux/vmalloc.h>
  21#include <linux/crash_dump.h>
  22#include <linux/crc32.h>
  23#include <linux/qed/qed_if.h>
  24#include <linux/qed/qed_ll2_if.h>
  25#include <net/devlink.h>
  26#include <linux/aer.h>
  27#include <linux/phylink.h>
  28
  29#include "qed.h"
  30#include "qed_sriov.h"
  31#include "qed_sp.h"
  32#include "qed_dev_api.h"
  33#include "qed_ll2.h"
  34#include "qed_fcoe.h"
  35#include "qed_iscsi.h"
  36
  37#include "qed_mcp.h"
  38#include "qed_reg_addr.h"
  39#include "qed_hw.h"
  40#include "qed_selftest.h"
  41#include "qed_debug.h"
  42#include "qed_devlink.h"
  43
  44#define QED_ROCE_QPS			(8192)
  45#define QED_ROCE_DPIS			(8)
  46#define QED_RDMA_SRQS                   QED_ROCE_QPS
  47#define QED_NVM_CFG_GET_FLAGS		0xA
  48#define QED_NVM_CFG_GET_PF_FLAGS	0x1A
  49#define QED_NVM_CFG_MAX_ATTRS		50
  50
  51static char version[] =
  52	"QLogic FastLinQ 4xxxx Core Module qed\n";
  53
  54MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
  55MODULE_LICENSE("GPL");
  56
  57#define FW_FILE_VERSION				\
  58	__stringify(FW_MAJOR_VERSION) "."	\
  59	__stringify(FW_MINOR_VERSION) "."	\
  60	__stringify(FW_REVISION_VERSION) "."	\
  61	__stringify(FW_ENGINEERING_VERSION)
  62
  63#define QED_FW_FILE_NAME	\
  64	"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
  65
  66MODULE_FIRMWARE(QED_FW_FILE_NAME);
  67
  68/* MFW speed capabilities maps */
  69
  70struct qed_mfw_speed_map {
  71	u32		mfw_val;
  72	__ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
  73
  74	const u32	*cap_arr;
  75	u32		arr_size;
  76};
  77
  78#define QED_MFW_SPEED_MAP(type, arr)		\
  79{						\
  80	.mfw_val	= (type),		\
  81	.cap_arr	= (arr),		\
  82	.arr_size	= ARRAY_SIZE(arr),	\
  83}
  84
  85static const u32 qed_mfw_ext_1g[] __initconst = {
  86	ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
  87	ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
  88	ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
  89};
  90
  91static const u32 qed_mfw_ext_10g[] __initconst = {
  92	ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
  93	ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
  94	ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
  95	ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
  96	ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
  97	ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
  98	ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
  99	ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
 100};
 101
 102static const u32 qed_mfw_ext_25g[] __initconst = {
 103	ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
 104	ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
 105	ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
 106};
 107
 108static const u32 qed_mfw_ext_40g[] __initconst = {
 109	ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
 110	ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
 111	ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
 112	ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
 113};
 114
 115static const u32 qed_mfw_ext_50g_base_r[] __initconst = {
 116	ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
 117	ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
 118	ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
 119	ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
 120	ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
 121};
 122
 123static const u32 qed_mfw_ext_50g_base_r2[] __initconst = {
 124	ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
 125	ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
 126	ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
 127};
 128
 129static const u32 qed_mfw_ext_100g_base_r2[] __initconst = {
 130	ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
 131	ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
 132	ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
 133	ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
 134	ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
 135};
 136
 137static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
 138	ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
 139	ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
 140	ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
 141	ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 142};
 143
 144static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
 145	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
 146	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
 147	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
 148	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
 149	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
 150			  qed_mfw_ext_50g_base_r),
 151	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2,
 152			  qed_mfw_ext_50g_base_r2),
 153	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2,
 154			  qed_mfw_ext_100g_base_r2),
 155	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4,
 156			  qed_mfw_ext_100g_base_r4),
 157};
 158
 159static const u32 qed_mfw_legacy_1g[] __initconst = {
 160	ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
 161	ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
 162	ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
 163};
 164
 165static const u32 qed_mfw_legacy_10g[] __initconst = {
 166	ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
 167	ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
 168	ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
 169	ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
 170	ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
 171	ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
 172	ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
 173	ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
 174};
 175
 176static const u32 qed_mfw_legacy_20g[] __initconst = {
 177	ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
 178};
 179
 180static const u32 qed_mfw_legacy_25g[] __initconst = {
 181	ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
 182	ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
 183	ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
 184};
 185
 186static const u32 qed_mfw_legacy_40g[] __initconst = {
 187	ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
 188	ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
 189	ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
 190	ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
 191};
 192
 193static const u32 qed_mfw_legacy_50g[] __initconst = {
 194	ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
 195	ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
 196	ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
 197};
 198
 199static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
 200	ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
 201	ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
 202	ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
 203	ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 204};
 205
 206static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
 207	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
 208			  qed_mfw_legacy_1g),
 209	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
 210			  qed_mfw_legacy_10g),
 211	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
 212			  qed_mfw_legacy_20g),
 213	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
 214			  qed_mfw_legacy_25g),
 215	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
 216			  qed_mfw_legacy_40g),
 217	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
 218			  qed_mfw_legacy_50g),
 219	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
 220			  qed_mfw_legacy_bb_100g),
 221};
 222
 223static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
 224{
 225	linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
 226
 227	map->cap_arr = NULL;
 228	map->arr_size = 0;
 229}
 230
 231static void __init qed_mfw_speed_maps_init(void)
 232{
 233	u32 i;
 234
 235	for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++)
 236		qed_mfw_speed_map_populate(qed_mfw_ext_maps + i);
 237
 238	for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
 239		qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
 240}
 241
 242static int __init qed_init(void)
 243{
 244	pr_info("%s", version);
 245
 246	qed_mfw_speed_maps_init();
 247
 248	return 0;
 249}
 250module_init(qed_init);
 251
 252static void __exit qed_exit(void)
 253{
 254	/* To prevent marking this module as "permanent" */
 255}
 256module_exit(qed_exit);
 257
 258static void qed_free_pci(struct qed_dev *cdev)
 259{
 260	struct pci_dev *pdev = cdev->pdev;
 261
 262	pci_disable_pcie_error_reporting(pdev);
 263
 264	if (cdev->doorbells && cdev->db_size)
 265		iounmap(cdev->doorbells);
 266	if (cdev->regview)
 267		iounmap(cdev->regview);
 268	if (atomic_read(&pdev->enable_cnt) == 1)
 269		pci_release_regions(pdev);
 270
 271	pci_disable_device(pdev);
 272}
 273
 274#define PCI_REVISION_ID_ERROR_VAL	0xff
 275
 276/* Performs PCI initializations as well as initializing PCI-related parameters
 277 * in the device structrue. Returns 0 in case of success.
 278 */
 279static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
 280{
 281	u8 rev_id;
 282	int rc;
 283
 284	cdev->pdev = pdev;
 285
 286	rc = pci_enable_device(pdev);
 287	if (rc) {
 288		DP_NOTICE(cdev, "Cannot enable PCI device\n");
 289		goto err0;
 290	}
 291
 292	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 293		DP_NOTICE(cdev, "No memory region found in bar #0\n");
 294		rc = -EIO;
 295		goto err1;
 296	}
 297
 298	if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 299		DP_NOTICE(cdev, "No memory region found in bar #2\n");
 300		rc = -EIO;
 301		goto err1;
 302	}
 303
 304	if (atomic_read(&pdev->enable_cnt) == 1) {
 305		rc = pci_request_regions(pdev, "qed");
 306		if (rc) {
 307			DP_NOTICE(cdev,
 308				  "Failed to request PCI memory resources\n");
 309			goto err1;
 310		}
 311		pci_set_master(pdev);
 312		pci_save_state(pdev);
 313	}
 314
 315	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
 316	if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
 317		DP_NOTICE(cdev,
 318			  "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
 319			  rev_id);
 320		rc = -ENODEV;
 321		goto err2;
 322	}
 323	if (!pci_is_pcie(pdev)) {
 324		DP_NOTICE(cdev, "The bus is not PCI Express\n");
 325		rc = -EIO;
 326		goto err2;
 327	}
 328
 329	cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 330	if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
 331		DP_NOTICE(cdev, "Cannot find power management capability\n");
 332
 333	rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64));
 334	if (rc) {
 335		DP_NOTICE(cdev, "Can't request DMA addresses\n");
 336		rc = -EIO;
 337		goto err2;
 338	}
 339
 340	cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
 341	cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
 342	cdev->pci_params.irq = pdev->irq;
 343
 344	cdev->regview = pci_ioremap_bar(pdev, 0);
 345	if (!cdev->regview) {
 346		DP_NOTICE(cdev, "Cannot map register space, aborting\n");
 347		rc = -ENOMEM;
 348		goto err2;
 349	}
 350
 351	cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
 352	cdev->db_size = pci_resource_len(cdev->pdev, 2);
 353	if (!cdev->db_size) {
 354		if (IS_PF(cdev)) {
 355			DP_NOTICE(cdev, "No Doorbell bar available\n");
 356			return -EINVAL;
 357		} else {
 358			return 0;
 359		}
 360	}
 361
 362	cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
 363
 364	if (!cdev->doorbells) {
 365		DP_NOTICE(cdev, "Cannot map doorbell space\n");
 366		return -ENOMEM;
 367	}
 368
 369	/* AER (Advanced Error reporting) configuration */
 370	rc = pci_enable_pcie_error_reporting(pdev);
 371	if (rc)
 372		DP_VERBOSE(cdev, NETIF_MSG_DRV,
 373			   "Failed to configure PCIe AER [%d]\n", rc);
 374
 375	return 0;
 376
 377err2:
 378	pci_release_regions(pdev);
 379err1:
 380	pci_disable_device(pdev);
 381err0:
 382	return rc;
 383}
 384
 385int qed_fill_dev_info(struct qed_dev *cdev,
 386		      struct qed_dev_info *dev_info)
 387{
 388	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 389	struct qed_hw_info *hw_info = &p_hwfn->hw_info;
 390	struct qed_tunnel_info *tun = &cdev->tunnel;
 391	struct qed_ptt  *ptt;
 392
 393	memset(dev_info, 0, sizeof(struct qed_dev_info));
 394
 395	if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 396	    tun->vxlan.b_mode_enabled)
 397		dev_info->vxlan_enable = true;
 398
 399	if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
 400	    tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 401	    tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 402		dev_info->gre_enable = true;
 403
 404	if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
 405	    tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 406	    tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 407		dev_info->geneve_enable = true;
 408
 409	dev_info->num_hwfns = cdev->num_hwfns;
 410	dev_info->pci_mem_start = cdev->pci_params.mem_start;
 411	dev_info->pci_mem_end = cdev->pci_params.mem_end;
 412	dev_info->pci_irq = cdev->pci_params.irq;
 413	dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
 414	dev_info->dev_type = cdev->type;
 415	ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
 416
 417	if (IS_PF(cdev)) {
 418		dev_info->fw_major = FW_MAJOR_VERSION;
 419		dev_info->fw_minor = FW_MINOR_VERSION;
 420		dev_info->fw_rev = FW_REVISION_VERSION;
 421		dev_info->fw_eng = FW_ENGINEERING_VERSION;
 422		dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
 423						       &cdev->mf_bits);
 424		if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
 425			dev_info->b_arfs_capable = true;
 426		dev_info->tx_switching = true;
 427
 428		if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
 429			dev_info->wol_support = true;
 430
 431		dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
 432		dev_info->esl = qed_mcp_is_esl_supported(p_hwfn);
 433		dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
 434	} else {
 435		qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
 436				      &dev_info->fw_minor, &dev_info->fw_rev,
 437				      &dev_info->fw_eng);
 438	}
 439
 440	if (IS_PF(cdev)) {
 441		ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
 442		if (ptt) {
 443			qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
 444					    &dev_info->mfw_rev, NULL);
 445
 446			qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
 447					    &dev_info->mbi_version);
 448
 449			qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
 450					       &dev_info->flash_size);
 451
 452			qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
 453		}
 454	} else {
 455		qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
 456				    &dev_info->mfw_rev, NULL);
 457	}
 458
 459	dev_info->mtu = hw_info->mtu;
 460	cdev->common_dev_info = *dev_info;
 461
 462	return 0;
 463}
 464
 465static void qed_free_cdev(struct qed_dev *cdev)
 466{
 467	kfree((void *)cdev);
 468}
 469
 470static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
 471{
 472	struct qed_dev *cdev;
 473
 474	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
 475	if (!cdev)
 476		return cdev;
 477
 478	qed_init_struct(cdev);
 479
 480	return cdev;
 481}
 482
 483/* Sets the requested power state */
 484static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
 485{
 486	if (!cdev)
 487		return -ENODEV;
 488
 489	DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
 490	return 0;
 491}
 492
 493/* probing */
 494static struct qed_dev *qed_probe(struct pci_dev *pdev,
 495				 struct qed_probe_params *params)
 496{
 497	struct qed_dev *cdev;
 498	int rc;
 499
 500	cdev = qed_alloc_cdev(pdev);
 501	if (!cdev)
 502		goto err0;
 503
 504	cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
 505	cdev->protocol = params->protocol;
 506
 507	if (params->is_vf)
 508		cdev->b_is_vf = true;
 509
 510	qed_init_dp(cdev, params->dp_module, params->dp_level);
 511
 512	cdev->recov_in_prog = params->recov_in_prog;
 513
 514	rc = qed_init_pci(cdev, pdev);
 515	if (rc) {
 516		DP_ERR(cdev, "init pci failed\n");
 517		goto err1;
 518	}
 519	DP_INFO(cdev, "PCI init completed successfully\n");
 520
 521	rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
 522	if (rc) {
 523		DP_ERR(cdev, "hw prepare failed\n");
 524		goto err2;
 525	}
 526
 527	DP_INFO(cdev, "%s completed successfully\n", __func__);
 528
 529	return cdev;
 530
 531err2:
 532	qed_free_pci(cdev);
 533err1:
 534	qed_free_cdev(cdev);
 535err0:
 536	return NULL;
 537}
 538
 539static void qed_remove(struct qed_dev *cdev)
 540{
 541	if (!cdev)
 542		return;
 543
 544	qed_hw_remove(cdev);
 545
 546	qed_free_pci(cdev);
 547
 548	qed_set_power_state(cdev, PCI_D3hot);
 549
 550	qed_free_cdev(cdev);
 551}
 552
 553static void qed_disable_msix(struct qed_dev *cdev)
 554{
 555	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 556		pci_disable_msix(cdev->pdev);
 557		kfree(cdev->int_params.msix_table);
 558	} else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
 559		pci_disable_msi(cdev->pdev);
 560	}
 561
 562	memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
 563}
 564
 565static int qed_enable_msix(struct qed_dev *cdev,
 566			   struct qed_int_params *int_params)
 567{
 568	int i, rc, cnt;
 569
 570	cnt = int_params->in.num_vectors;
 571
 572	for (i = 0; i < cnt; i++)
 573		int_params->msix_table[i].entry = i;
 574
 575	rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
 576				   int_params->in.min_msix_cnt, cnt);
 577	if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
 578	    (rc % cdev->num_hwfns)) {
 579		pci_disable_msix(cdev->pdev);
 580
 581		/* If fastpath is initialized, we need at least one interrupt
 582		 * per hwfn [and the slow path interrupts]. New requested number
 583		 * should be a multiple of the number of hwfns.
 584		 */
 585		cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
 586		DP_NOTICE(cdev,
 587			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
 588			  cnt, int_params->in.num_vectors);
 589		rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
 590					   cnt);
 591		if (!rc)
 592			rc = cnt;
 593	}
 594
 595	/* For VFs, we should return with an error in case we didn't get the
 596	 * exact number of msix vectors as we requested.
 597	 * Not doing that will lead to a crash when starting queues for
 598	 * this VF.
 599	 */
 600	if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
 601		/* MSI-x configuration was achieved */
 602		int_params->out.int_mode = QED_INT_MODE_MSIX;
 603		int_params->out.num_vectors = rc;
 604		rc = 0;
 605	} else {
 606		DP_NOTICE(cdev,
 607			  "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
 608			  cnt, rc);
 609	}
 610
 611	return rc;
 612}
 613
 614/* This function outputs the int mode and the number of enabled msix vector */
 615static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
 616{
 617	struct qed_int_params *int_params = &cdev->int_params;
 618	struct msix_entry *tbl;
 619	int rc = 0, cnt;
 620
 621	switch (int_params->in.int_mode) {
 622	case QED_INT_MODE_MSIX:
 623		/* Allocate MSIX table */
 624		cnt = int_params->in.num_vectors;
 625		int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
 626		if (!int_params->msix_table) {
 627			rc = -ENOMEM;
 628			goto out;
 629		}
 630
 631		/* Enable MSIX */
 632		rc = qed_enable_msix(cdev, int_params);
 633		if (!rc)
 634			goto out;
 635
 636		DP_NOTICE(cdev, "Failed to enable MSI-X\n");
 637		kfree(int_params->msix_table);
 638		if (force_mode)
 639			goto out;
 640		fallthrough;
 641
 642	case QED_INT_MODE_MSI:
 643		if (cdev->num_hwfns == 1) {
 644			rc = pci_enable_msi(cdev->pdev);
 645			if (!rc) {
 646				int_params->out.int_mode = QED_INT_MODE_MSI;
 647				goto out;
 648			}
 649
 650			DP_NOTICE(cdev, "Failed to enable MSI\n");
 651			if (force_mode)
 652				goto out;
 653		}
 654		fallthrough;
 655
 656	case QED_INT_MODE_INTA:
 657			int_params->out.int_mode = QED_INT_MODE_INTA;
 658			rc = 0;
 659			goto out;
 660	default:
 661		DP_NOTICE(cdev, "Unknown int_mode value %d\n",
 662			  int_params->in.int_mode);
 663		rc = -EINVAL;
 664	}
 665
 666out:
 667	if (!rc)
 668		DP_INFO(cdev, "Using %s interrupts\n",
 669			int_params->out.int_mode == QED_INT_MODE_INTA ?
 670			"INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
 671			"MSI" : "MSIX");
 672	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
 673
 674	return rc;
 675}
 676
 677static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
 678				    int index, void(*handler)(void *))
 679{
 680	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 681	int relative_idx = index / cdev->num_hwfns;
 682
 683	hwfn->simd_proto_handler[relative_idx].func = handler;
 684	hwfn->simd_proto_handler[relative_idx].token = token;
 685}
 686
 687static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
 688{
 689	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 690	int relative_idx = index / cdev->num_hwfns;
 691
 692	memset(&hwfn->simd_proto_handler[relative_idx], 0,
 693	       sizeof(struct qed_simd_fp_handler));
 694}
 695
 696static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
 697{
 698	tasklet_schedule((struct tasklet_struct *)tasklet);
 699	return IRQ_HANDLED;
 700}
 701
 702static irqreturn_t qed_single_int(int irq, void *dev_instance)
 703{
 704	struct qed_dev *cdev = (struct qed_dev *)dev_instance;
 705	struct qed_hwfn *hwfn;
 706	irqreturn_t rc = IRQ_NONE;
 707	u64 status;
 708	int i, j;
 709
 710	for (i = 0; i < cdev->num_hwfns; i++) {
 711		status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
 712
 713		if (!status)
 714			continue;
 715
 716		hwfn = &cdev->hwfns[i];
 717
 718		/* Slowpath interrupt */
 719		if (unlikely(status & 0x1)) {
 720			tasklet_schedule(&hwfn->sp_dpc);
 721			status &= ~0x1;
 722			rc = IRQ_HANDLED;
 723		}
 724
 725		/* Fastpath interrupts */
 726		for (j = 0; j < 64; j++) {
 727			if ((0x2ULL << j) & status) {
 728				struct qed_simd_fp_handler *p_handler =
 729					&hwfn->simd_proto_handler[j];
 730
 731				if (p_handler->func)
 732					p_handler->func(p_handler->token);
 733				else
 734					DP_NOTICE(hwfn,
 735						  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
 736						  j, status);
 737
 738				status &= ~(0x2ULL << j);
 739				rc = IRQ_HANDLED;
 740			}
 741		}
 742
 743		if (unlikely(status))
 744			DP_VERBOSE(hwfn, NETIF_MSG_INTR,
 745				   "got an unknown interrupt status 0x%llx\n",
 746				   status);
 747	}
 748
 749	return rc;
 750}
 751
 752int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 753{
 754	struct qed_dev *cdev = hwfn->cdev;
 755	u32 int_mode;
 756	int rc = 0;
 757	u8 id;
 758
 759	int_mode = cdev->int_params.out.int_mode;
 760	if (int_mode == QED_INT_MODE_MSIX) {
 761		id = hwfn->my_id;
 762		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
 763			 id, cdev->pdev->bus->number,
 764			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
 765		rc = request_irq(cdev->int_params.msix_table[id].vector,
 766				 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
 767	} else {
 768		unsigned long flags = 0;
 769
 770		snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
 771			 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
 772			 PCI_FUNC(cdev->pdev->devfn));
 773
 774		if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
 775			flags |= IRQF_SHARED;
 776
 777		rc = request_irq(cdev->pdev->irq, qed_single_int,
 778				 flags, cdev->name, cdev);
 779	}
 780
 781	if (rc)
 782		DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
 783	else
 784		DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
 785			   "Requested slowpath %s\n",
 786			   (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
 787
 788	return rc;
 789}
 790
 791static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
 792{
 793	/* Calling the disable function will make sure that any
 794	 * currently-running function is completed. The following call to the
 795	 * enable function makes this sequence a flush-like operation.
 796	 */
 797	if (p_hwfn->b_sp_dpc_enabled) {
 798		tasklet_disable(&p_hwfn->sp_dpc);
 799		tasklet_enable(&p_hwfn->sp_dpc);
 800	}
 801}
 802
 803void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
 804{
 805	struct qed_dev *cdev = p_hwfn->cdev;
 806	u8 id = p_hwfn->my_id;
 807	u32 int_mode;
 808
 809	int_mode = cdev->int_params.out.int_mode;
 810	if (int_mode == QED_INT_MODE_MSIX)
 811		synchronize_irq(cdev->int_params.msix_table[id].vector);
 812	else
 813		synchronize_irq(cdev->pdev->irq);
 814
 815	qed_slowpath_tasklet_flush(p_hwfn);
 816}
 817
 818static void qed_slowpath_irq_free(struct qed_dev *cdev)
 819{
 820	int i;
 821
 822	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 823		for_each_hwfn(cdev, i) {
 824			if (!cdev->hwfns[i].b_int_requested)
 825				break;
 826			free_irq(cdev->int_params.msix_table[i].vector,
 827				 &cdev->hwfns[i].sp_dpc);
 828		}
 829	} else {
 830		if (QED_LEADING_HWFN(cdev)->b_int_requested)
 831			free_irq(cdev->pdev->irq, cdev);
 832	}
 833	qed_int_disable_post_isr_release(cdev);
 834}
 835
 836static int qed_nic_stop(struct qed_dev *cdev)
 837{
 838	int i, rc;
 839
 840	rc = qed_hw_stop(cdev);
 841
 842	for (i = 0; i < cdev->num_hwfns; i++) {
 843		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 844
 845		if (p_hwfn->b_sp_dpc_enabled) {
 846			tasklet_disable(&p_hwfn->sp_dpc);
 847			p_hwfn->b_sp_dpc_enabled = false;
 848			DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
 849				   "Disabled sp tasklet [hwfn %d] at %p\n",
 850				   i, &p_hwfn->sp_dpc);
 851		}
 852	}
 853
 854	qed_dbg_pf_exit(cdev);
 855
 856	return rc;
 857}
 858
 859static int qed_nic_setup(struct qed_dev *cdev)
 860{
 861	int rc, i;
 862
 863	/* Determine if interface is going to require LL2 */
 864	if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
 865		for (i = 0; i < cdev->num_hwfns; i++) {
 866			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 867
 868			p_hwfn->using_ll2 = true;
 869		}
 870	}
 871
 872	rc = qed_resc_alloc(cdev);
 873	if (rc)
 874		return rc;
 875
 876	DP_INFO(cdev, "Allocated qed resources\n");
 877
 878	qed_resc_setup(cdev);
 879
 880	return rc;
 881}
 882
 883static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
 884{
 885	int limit = 0;
 886
 887	/* Mark the fastpath as free/used */
 888	cdev->int_params.fp_initialized = cnt ? true : false;
 889
 890	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
 891		limit = cdev->num_hwfns * 63;
 892	else if (cdev->int_params.fp_msix_cnt)
 893		limit = cdev->int_params.fp_msix_cnt;
 894
 895	if (!limit)
 896		return -ENOMEM;
 897
 898	return min_t(int, cnt, limit);
 899}
 900
 901static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
 902{
 903	memset(info, 0, sizeof(struct qed_int_info));
 904
 905	if (!cdev->int_params.fp_initialized) {
 906		DP_INFO(cdev,
 907			"Protocol driver requested interrupt information, but its support is not yet configured\n");
 908		return -EINVAL;
 909	}
 910
 911	/* Need to expose only MSI-X information; Single IRQ is handled solely
 912	 * by qed.
 913	 */
 914	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 915		int msix_base = cdev->int_params.fp_msix_base;
 916
 917		info->msix_cnt = cdev->int_params.fp_msix_cnt;
 918		info->msix = &cdev->int_params.msix_table[msix_base];
 919	}
 920
 921	return 0;
 922}
 923
 924static int qed_slowpath_setup_int(struct qed_dev *cdev,
 925				  enum qed_int_mode int_mode)
 926{
 927	struct qed_sb_cnt_info sb_cnt_info;
 928	int num_l2_queues = 0;
 929	int rc;
 930	int i;
 931
 932	if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
 933		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
 934		return -EINVAL;
 935	}
 936
 937	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
 938	cdev->int_params.in.int_mode = int_mode;
 939	for_each_hwfn(cdev, i) {
 940		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
 941		qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
 942		cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
 943		cdev->int_params.in.num_vectors++; /* slowpath */
 944	}
 945
 946	/* We want a minimum of one slowpath and one fastpath vector per hwfn */
 947	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 948
 949	if (is_kdump_kernel()) {
 950		DP_INFO(cdev,
 951			"Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
 952			cdev->int_params.in.min_msix_cnt);
 953		cdev->int_params.in.num_vectors =
 954			cdev->int_params.in.min_msix_cnt;
 955	}
 956
 957	rc = qed_set_int_mode(cdev, false);
 958	if (rc)  {
 959		DP_ERR(cdev, "%s ERR\n", __func__);
 960		return rc;
 961	}
 962
 963	cdev->int_params.fp_msix_base = cdev->num_hwfns;
 964	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
 965				       cdev->num_hwfns;
 966
 967	if (!IS_ENABLED(CONFIG_QED_RDMA) ||
 968	    !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
 969		return 0;
 970
 971	for_each_hwfn(cdev, i)
 972		num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
 973
 974	DP_VERBOSE(cdev, QED_MSG_RDMA,
 975		   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
 976		   cdev->int_params.fp_msix_cnt, num_l2_queues);
 977
 978	if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
 979		cdev->int_params.rdma_msix_cnt =
 980			(cdev->int_params.fp_msix_cnt - num_l2_queues)
 981			/ cdev->num_hwfns;
 982		cdev->int_params.rdma_msix_base =
 983			cdev->int_params.fp_msix_base + num_l2_queues;
 984		cdev->int_params.fp_msix_cnt = num_l2_queues;
 985	} else {
 986		cdev->int_params.rdma_msix_cnt = 0;
 987	}
 988
 989	DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
 990		   cdev->int_params.rdma_msix_cnt,
 991		   cdev->int_params.rdma_msix_base);
 992
 993	return 0;
 994}
 995
 996static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
 997{
 998	int rc;
 999
1000	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
1001	cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
1002
1003	qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
1004			    &cdev->int_params.in.num_vectors);
1005	if (cdev->num_hwfns > 1) {
1006		u8 vectors = 0;
1007
1008		qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
1009		cdev->int_params.in.num_vectors += vectors;
1010	}
1011
1012	/* We want a minimum of one fastpath vector per vf hwfn */
1013	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
1014
1015	rc = qed_set_int_mode(cdev, true);
1016	if (rc)
1017		return rc;
1018
1019	cdev->int_params.fp_msix_base = 0;
1020	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
1021
1022	return 0;
1023}
1024
1025u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
1026		   u8 *input_buf, u32 max_size, u8 *unzip_buf)
1027{
1028	int rc;
1029
1030	p_hwfn->stream->next_in = input_buf;
1031	p_hwfn->stream->avail_in = input_len;
1032	p_hwfn->stream->next_out = unzip_buf;
1033	p_hwfn->stream->avail_out = max_size;
1034
1035	rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
1036
1037	if (rc != Z_OK) {
1038		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1039			   rc);
1040		return 0;
1041	}
1042
1043	rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1044	zlib_inflateEnd(p_hwfn->stream);
1045
1046	if (rc != Z_OK && rc != Z_STREAM_END) {
1047		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1048			   p_hwfn->stream->msg, rc);
1049		return 0;
1050	}
1051
1052	return p_hwfn->stream->total_out / 4;
1053}
1054
1055static int qed_alloc_stream_mem(struct qed_dev *cdev)
1056{
1057	int i;
1058	void *workspace;
1059
1060	for_each_hwfn(cdev, i) {
1061		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1062
1063		p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1064		if (!p_hwfn->stream)
1065			return -ENOMEM;
1066
1067		workspace = vzalloc(zlib_inflate_workspacesize());
1068		if (!workspace)
1069			return -ENOMEM;
1070		p_hwfn->stream->workspace = workspace;
1071	}
1072
1073	return 0;
1074}
1075
1076static void qed_free_stream_mem(struct qed_dev *cdev)
1077{
1078	int i;
1079
1080	for_each_hwfn(cdev, i) {
1081		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1082
1083		if (!p_hwfn->stream)
1084			return;
1085
1086		vfree(p_hwfn->stream->workspace);
1087		kfree(p_hwfn->stream);
1088	}
1089}
1090
1091static void qed_update_pf_params(struct qed_dev *cdev,
1092				 struct qed_pf_params *params)
1093{
1094	int i;
1095
1096	if (IS_ENABLED(CONFIG_QED_RDMA)) {
1097		params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1098		params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1099		params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1100		/* divide by 3 the MRs to avoid MF ILT overflow */
1101		params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1102	}
1103
1104	if (cdev->num_hwfns > 1 || IS_VF(cdev))
1105		params->eth_pf_params.num_arfs_filters = 0;
1106
1107	/* In case we might support RDMA, don't allow qede to be greedy
1108	 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1109	 * per hwfn.
1110	 */
1111	if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1112		u16 *num_cons;
1113
1114		num_cons = &params->eth_pf_params.num_cons;
1115		*num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1116	}
1117
1118	for (i = 0; i < cdev->num_hwfns; i++) {
1119		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1120
1121		p_hwfn->pf_params = *params;
1122	}
1123}
1124
1125#define QED_PERIODIC_DB_REC_COUNT		10
1126#define QED_PERIODIC_DB_REC_INTERVAL_MS		100
1127#define QED_PERIODIC_DB_REC_INTERVAL \
1128	msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1129
1130static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1131				     enum qed_slowpath_wq_flag wq_flag,
1132				     unsigned long delay)
1133{
1134	if (!hwfn->slowpath_wq_active)
1135		return -EINVAL;
1136
1137	/* Memory barrier for setting atomic bit */
1138	smp_mb__before_atomic();
1139	set_bit(wq_flag, &hwfn->slowpath_task_flags);
1140	/* Memory barrier after setting atomic bit */
1141	smp_mb__after_atomic();
1142	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1143
1144	return 0;
1145}
1146
1147void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1148{
1149	/* Reset periodic Doorbell Recovery counter */
1150	p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1151
1152	/* Don't schedule periodic Doorbell Recovery if already scheduled */
1153	if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1154		     &p_hwfn->slowpath_task_flags))
1155		return;
1156
1157	qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1158				  QED_PERIODIC_DB_REC_INTERVAL);
1159}
1160
1161static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1162{
1163	int i;
1164
1165	if (IS_VF(cdev))
1166		return;
1167
1168	for_each_hwfn(cdev, i) {
1169		if (!cdev->hwfns[i].slowpath_wq)
1170			continue;
1171
1172		/* Stop queuing new delayed works */
1173		cdev->hwfns[i].slowpath_wq_active = false;
1174
1175		cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
1176		destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1177	}
1178}
1179
1180static void qed_slowpath_task(struct work_struct *work)
1181{
1182	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1183					     slowpath_task.work);
1184	struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1185
1186	if (!ptt) {
1187		if (hwfn->slowpath_wq_active)
1188			queue_delayed_work(hwfn->slowpath_wq,
1189					   &hwfn->slowpath_task, 0);
1190
1191		return;
1192	}
1193
1194	if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1195			       &hwfn->slowpath_task_flags))
1196		qed_mfw_process_tlv_req(hwfn, ptt);
1197
1198	if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1199			       &hwfn->slowpath_task_flags)) {
1200		/* skip qed_db_rec_handler during recovery/unload */
1201		if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active)
1202			goto out;
1203
1204		qed_db_rec_handler(hwfn, ptt);
1205		if (hwfn->periodic_db_rec_count--)
1206			qed_slowpath_delayed_work(hwfn,
1207						  QED_SLOWPATH_PERIODIC_DB_REC,
1208						  QED_PERIODIC_DB_REC_INTERVAL);
1209	}
1210
1211out:
1212	qed_ptt_release(hwfn, ptt);
1213}
1214
1215static int qed_slowpath_wq_start(struct qed_dev *cdev)
1216{
1217	struct qed_hwfn *hwfn;
1218	char name[NAME_SIZE];
1219	int i;
1220
1221	if (IS_VF(cdev))
1222		return 0;
1223
1224	for_each_hwfn(cdev, i) {
1225		hwfn = &cdev->hwfns[i];
1226
1227		snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1228			 cdev->pdev->bus->number,
1229			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1230
1231		hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1232		if (!hwfn->slowpath_wq) {
1233			DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1234			return -ENOMEM;
1235		}
1236
1237		INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1238		hwfn->slowpath_wq_active = true;
1239	}
1240
1241	return 0;
1242}
1243
1244static int qed_slowpath_start(struct qed_dev *cdev,
1245			      struct qed_slowpath_params *params)
1246{
1247	struct qed_drv_load_params drv_load_params;
1248	struct qed_hw_init_params hw_init_params;
1249	struct qed_mcp_drv_version drv_version;
1250	struct qed_tunnel_info tunn_info;
1251	const u8 *data = NULL;
1252	struct qed_hwfn *hwfn;
1253	struct qed_ptt *p_ptt;
1254	int rc = -EINVAL;
1255
1256	if (qed_iov_wq_start(cdev))
1257		goto err;
1258
1259	if (qed_slowpath_wq_start(cdev))
1260		goto err;
1261
1262	if (IS_PF(cdev)) {
1263		rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1264				      &cdev->pdev->dev);
1265		if (rc) {
1266			DP_NOTICE(cdev,
1267				  "Failed to find fw file - /lib/firmware/%s\n",
1268				  QED_FW_FILE_NAME);
1269			goto err;
1270		}
1271
1272		if (cdev->num_hwfns == 1) {
1273			p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1274			if (p_ptt) {
1275				QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1276			} else {
1277				DP_NOTICE(cdev,
1278					  "Failed to acquire PTT for aRFS\n");
1279				rc = -EINVAL;
1280				goto err;
1281			}
1282		}
1283	}
1284
1285	cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1286	rc = qed_nic_setup(cdev);
1287	if (rc)
1288		goto err;
1289
1290	if (IS_PF(cdev))
1291		rc = qed_slowpath_setup_int(cdev, params->int_mode);
1292	else
1293		rc = qed_slowpath_vf_setup_int(cdev);
1294	if (rc)
1295		goto err1;
1296
1297	if (IS_PF(cdev)) {
1298		/* Allocate stream for unzipping */
1299		rc = qed_alloc_stream_mem(cdev);
1300		if (rc)
1301			goto err2;
1302
1303		/* First Dword used to differentiate between various sources */
1304		data = cdev->firmware->data + sizeof(u32);
1305
1306		qed_dbg_pf_init(cdev);
1307	}
1308
1309	/* Start the slowpath */
1310	memset(&hw_init_params, 0, sizeof(hw_init_params));
1311	memset(&tunn_info, 0, sizeof(tunn_info));
1312	tunn_info.vxlan.b_mode_enabled = true;
1313	tunn_info.l2_gre.b_mode_enabled = true;
1314	tunn_info.ip_gre.b_mode_enabled = true;
1315	tunn_info.l2_geneve.b_mode_enabled = true;
1316	tunn_info.ip_geneve.b_mode_enabled = true;
1317	tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1318	tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1319	tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1320	tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1321	tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1322	hw_init_params.p_tunn = &tunn_info;
1323	hw_init_params.b_hw_start = true;
1324	hw_init_params.int_mode = cdev->int_params.out.int_mode;
1325	hw_init_params.allow_npar_tx_switch = true;
1326	hw_init_params.bin_fw_data = data;
1327
1328	memset(&drv_load_params, 0, sizeof(drv_load_params));
1329	drv_load_params.is_crash_kernel = is_kdump_kernel();
1330	drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1331	drv_load_params.avoid_eng_reset = false;
1332	drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1333	hw_init_params.p_drv_load_params = &drv_load_params;
1334
1335	rc = qed_hw_init(cdev, &hw_init_params);
1336	if (rc)
1337		goto err2;
1338
1339	DP_INFO(cdev,
1340		"HW initialization and function start completed successfully\n");
1341
1342	if (IS_PF(cdev)) {
1343		cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1344					   BIT(QED_MODE_L2GENEVE_TUNN) |
1345					   BIT(QED_MODE_IPGENEVE_TUNN) |
1346					   BIT(QED_MODE_L2GRE_TUNN) |
1347					   BIT(QED_MODE_IPGRE_TUNN));
1348	}
1349
1350	/* Allocate LL2 interface if needed */
1351	if (QED_LEADING_HWFN(cdev)->using_ll2) {
1352		rc = qed_ll2_alloc_if(cdev);
1353		if (rc)
1354			goto err3;
1355	}
1356	if (IS_PF(cdev)) {
1357		hwfn = QED_LEADING_HWFN(cdev);
1358		drv_version.version = (params->drv_major << 24) |
1359				      (params->drv_minor << 16) |
1360				      (params->drv_rev << 8) |
1361				      (params->drv_eng);
1362		strscpy(drv_version.name, params->name,
1363			MCP_DRV_VER_STR_SIZE - 4);
1364		rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1365					      &drv_version);
1366		if (rc) {
1367			DP_NOTICE(cdev, "Failed sending drv version command\n");
1368			goto err4;
1369		}
1370	}
1371
1372	qed_reset_vport_stats(cdev);
1373
1374	return 0;
1375
1376err4:
1377	qed_ll2_dealloc_if(cdev);
1378err3:
1379	qed_hw_stop(cdev);
1380err2:
1381	qed_hw_timers_stop_all(cdev);
1382	if (IS_PF(cdev))
1383		qed_slowpath_irq_free(cdev);
1384	qed_free_stream_mem(cdev);
1385	qed_disable_msix(cdev);
1386err1:
1387	qed_resc_free(cdev);
1388err:
1389	if (IS_PF(cdev))
1390		release_firmware(cdev->firmware);
1391
1392	if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1393	    QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1394		qed_ptt_release(QED_LEADING_HWFN(cdev),
1395				QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1396
1397	qed_iov_wq_stop(cdev, false);
1398
1399	qed_slowpath_wq_stop(cdev);
1400
1401	return rc;
1402}
1403
1404static int qed_slowpath_stop(struct qed_dev *cdev)
1405{
1406	if (!cdev)
1407		return -ENODEV;
1408
1409	qed_slowpath_wq_stop(cdev);
1410
1411	qed_ll2_dealloc_if(cdev);
1412
1413	if (IS_PF(cdev)) {
1414		if (cdev->num_hwfns == 1)
1415			qed_ptt_release(QED_LEADING_HWFN(cdev),
1416					QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1417		qed_free_stream_mem(cdev);
1418		if (IS_QED_ETH_IF(cdev))
1419			qed_sriov_disable(cdev, true);
1420	}
1421
1422	qed_nic_stop(cdev);
1423
1424	if (IS_PF(cdev))
1425		qed_slowpath_irq_free(cdev);
1426
1427	qed_disable_msix(cdev);
1428
1429	qed_resc_free(cdev);
1430
1431	qed_iov_wq_stop(cdev, true);
1432
1433	if (IS_PF(cdev))
1434		release_firmware(cdev->firmware);
1435
1436	return 0;
1437}
1438
1439static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1440{
1441	int i;
1442
1443	memcpy(cdev->name, name, NAME_SIZE);
1444	for_each_hwfn(cdev, i)
1445		snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1446}
1447
1448static u32 qed_sb_init(struct qed_dev *cdev,
1449		       struct qed_sb_info *sb_info,
1450		       void *sb_virt_addr,
1451		       dma_addr_t sb_phy_addr, u16 sb_id,
1452		       enum qed_sb_type type)
1453{
1454	struct qed_hwfn *p_hwfn;
1455	struct qed_ptt *p_ptt;
1456	u16 rel_sb_id;
1457	u32 rc;
1458
1459	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1460	if (type == QED_SB_TYPE_L2_QUEUE) {
1461		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1462		rel_sb_id = sb_id / cdev->num_hwfns;
1463	} else {
1464		p_hwfn = QED_AFFIN_HWFN(cdev);
1465		rel_sb_id = sb_id;
1466	}
1467
1468	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1469		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1470		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1471
1472	if (IS_PF(p_hwfn->cdev)) {
1473		p_ptt = qed_ptt_acquire(p_hwfn);
1474		if (!p_ptt)
1475			return -EBUSY;
1476
1477		rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1478				     sb_phy_addr, rel_sb_id);
1479		qed_ptt_release(p_hwfn, p_ptt);
1480	} else {
1481		rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1482				     sb_phy_addr, rel_sb_id);
1483	}
1484
1485	return rc;
1486}
1487
1488static u32 qed_sb_release(struct qed_dev *cdev,
1489			  struct qed_sb_info *sb_info,
1490			  u16 sb_id,
1491			  enum qed_sb_type type)
1492{
1493	struct qed_hwfn *p_hwfn;
1494	u16 rel_sb_id;
1495	u32 rc;
1496
1497	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1498	if (type == QED_SB_TYPE_L2_QUEUE) {
1499		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1500		rel_sb_id = sb_id / cdev->num_hwfns;
1501	} else {
1502		p_hwfn = QED_AFFIN_HWFN(cdev);
1503		rel_sb_id = sb_id;
1504	}
1505
1506	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1507		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1508		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1509
1510	rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1511
1512	return rc;
1513}
1514
1515static bool qed_can_link_change(struct qed_dev *cdev)
1516{
1517	return true;
1518}
1519
1520static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params,
1521				     const struct qed_link_params *params)
1522{
1523	struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed;
1524	const struct qed_mfw_speed_map *map;
1525	u32 i;
1526
1527	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1528		ext_speed->autoneg = !!params->autoneg;
1529
1530	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1531		ext_speed->advertised_speeds = 0;
1532
1533		for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) {
1534			map = qed_mfw_ext_maps + i;
1535
1536			if (linkmode_intersects(params->adv_speeds, map->caps))
1537				ext_speed->advertised_speeds |= map->mfw_val;
1538		}
1539	}
1540
1541	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) {
1542		switch (params->forced_speed) {
1543		case SPEED_1000:
1544			ext_speed->forced_speed = QED_EXT_SPEED_1G;
1545			break;
1546		case SPEED_10000:
1547			ext_speed->forced_speed = QED_EXT_SPEED_10G;
1548			break;
1549		case SPEED_20000:
1550			ext_speed->forced_speed = QED_EXT_SPEED_20G;
1551			break;
1552		case SPEED_25000:
1553			ext_speed->forced_speed = QED_EXT_SPEED_25G;
1554			break;
1555		case SPEED_40000:
1556			ext_speed->forced_speed = QED_EXT_SPEED_40G;
1557			break;
1558		case SPEED_50000:
1559			ext_speed->forced_speed = QED_EXT_SPEED_50G_R |
1560						  QED_EXT_SPEED_50G_R2;
1561			break;
1562		case SPEED_100000:
1563			ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 |
1564						  QED_EXT_SPEED_100G_R4 |
1565						  QED_EXT_SPEED_100G_P4;
1566			break;
1567		default:
1568			break;
1569		}
1570	}
1571
1572	if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG))
1573		return;
1574
1575	switch (params->forced_speed) {
1576	case SPEED_25000:
1577		switch (params->fec) {
1578		case FEC_FORCE_MODE_NONE:
1579			link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE;
1580			break;
1581		case FEC_FORCE_MODE_FIRECODE:
1582			link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R;
1583			break;
1584		case FEC_FORCE_MODE_RS:
1585			link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528;
1586			break;
1587		case FEC_FORCE_MODE_AUTO:
1588			link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 |
1589						    ETH_EXT_FEC_25G_BASE_R |
1590						    ETH_EXT_FEC_25G_NONE;
1591			break;
1592		default:
1593			break;
1594		}
1595
1596		break;
1597	case SPEED_40000:
1598		switch (params->fec) {
1599		case FEC_FORCE_MODE_NONE:
1600			link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE;
1601			break;
1602		case FEC_FORCE_MODE_FIRECODE:
1603			link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R;
1604			break;
1605		case FEC_FORCE_MODE_AUTO:
1606			link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R |
1607						    ETH_EXT_FEC_40G_NONE;
1608			break;
1609		default:
1610			break;
1611		}
1612
1613		break;
1614	case SPEED_50000:
1615		switch (params->fec) {
1616		case FEC_FORCE_MODE_NONE:
1617			link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE;
1618			break;
1619		case FEC_FORCE_MODE_FIRECODE:
1620			link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R;
1621			break;
1622		case FEC_FORCE_MODE_RS:
1623			link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528;
1624			break;
1625		case FEC_FORCE_MODE_AUTO:
1626			link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 |
1627						    ETH_EXT_FEC_50G_BASE_R |
1628						    ETH_EXT_FEC_50G_NONE;
1629			break;
1630		default:
1631			break;
1632		}
1633
1634		break;
1635	case SPEED_100000:
1636		switch (params->fec) {
1637		case FEC_FORCE_MODE_NONE:
1638			link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE;
1639			break;
1640		case FEC_FORCE_MODE_FIRECODE:
1641			link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R;
1642			break;
1643		case FEC_FORCE_MODE_RS:
1644			link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528;
1645			break;
1646		case FEC_FORCE_MODE_AUTO:
1647			link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 |
1648						    ETH_EXT_FEC_100G_BASE_R |
1649						    ETH_EXT_FEC_100G_NONE;
1650			break;
1651		default:
1652			break;
1653		}
1654
1655		break;
1656	default:
1657		break;
1658	}
1659}
1660
1661static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1662{
1663	struct qed_mcp_link_params *link_params;
1664	struct qed_mcp_link_speed_params *speed;
1665	const struct qed_mfw_speed_map *map;
1666	struct qed_hwfn *hwfn;
1667	struct qed_ptt *ptt;
1668	int rc;
1669	u32 i;
1670
1671	if (!cdev)
1672		return -ENODEV;
1673
1674	/* The link should be set only once per PF */
1675	hwfn = &cdev->hwfns[0];
1676
1677	/* When VF wants to set link, force it to read the bulletin instead.
1678	 * This mimics the PF behavior, where a noitification [both immediate
1679	 * and possible later] would be generated when changing properties.
1680	 */
1681	if (IS_VF(cdev)) {
1682		qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1683		return 0;
1684	}
1685
1686	ptt = qed_ptt_acquire(hwfn);
1687	if (!ptt)
1688		return -EBUSY;
1689
1690	link_params = qed_mcp_get_link_params(hwfn);
1691	if (!link_params)
1692		return -ENODATA;
1693
1694	speed = &link_params->speed;
1695
1696	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1697		speed->autoneg = !!params->autoneg;
1698
1699	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1700		speed->advertised_speeds = 0;
1701
1702		for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
1703			map = qed_mfw_legacy_maps + i;
1704
1705			if (linkmode_intersects(params->adv_speeds, map->caps))
1706				speed->advertised_speeds |= map->mfw_val;
1707		}
1708	}
1709
1710	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1711		speed->forced_speed = params->forced_speed;
1712
1713	if (qed_mcp_is_ext_speed_supported(hwfn))
1714		qed_set_ext_speed_params(link_params, params);
1715
1716	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1717		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1718			link_params->pause.autoneg = true;
1719		else
1720			link_params->pause.autoneg = false;
1721		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1722			link_params->pause.forced_rx = true;
1723		else
1724			link_params->pause.forced_rx = false;
1725		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1726			link_params->pause.forced_tx = true;
1727		else
1728			link_params->pause.forced_tx = false;
1729	}
1730
1731	if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1732		switch (params->loopback_mode) {
1733		case QED_LINK_LOOPBACK_INT_PHY:
1734			link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1735			break;
1736		case QED_LINK_LOOPBACK_EXT_PHY:
1737			link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1738			break;
1739		case QED_LINK_LOOPBACK_EXT:
1740			link_params->loopback_mode = ETH_LOOPBACK_EXT;
1741			break;
1742		case QED_LINK_LOOPBACK_MAC:
1743			link_params->loopback_mode = ETH_LOOPBACK_MAC;
1744			break;
1745		case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
1746			link_params->loopback_mode =
1747				ETH_LOOPBACK_CNIG_AH_ONLY_0123;
1748			break;
1749		case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
1750			link_params->loopback_mode =
1751				ETH_LOOPBACK_CNIG_AH_ONLY_2301;
1752			break;
1753		case QED_LINK_LOOPBACK_PCS_AH_ONLY:
1754			link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY;
1755			break;
1756		case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
1757			link_params->loopback_mode =
1758				ETH_LOOPBACK_REVERSE_MAC_AH_ONLY;
1759			break;
1760		case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
1761			link_params->loopback_mode =
1762				ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY;
1763			break;
1764		default:
1765			link_params->loopback_mode = ETH_LOOPBACK_NONE;
1766			break;
1767		}
1768	}
1769
1770	if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1771		memcpy(&link_params->eee, &params->eee,
1772		       sizeof(link_params->eee));
1773
1774	if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
1775		link_params->fec = params->fec;
1776
1777	rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1778
1779	qed_ptt_release(hwfn, ptt);
1780
1781	return rc;
1782}
1783
1784static int qed_get_port_type(u32 media_type)
1785{
1786	int port_type;
1787
1788	switch (media_type) {
1789	case MEDIA_SFPP_10G_FIBER:
1790	case MEDIA_SFP_1G_FIBER:
1791	case MEDIA_XFP_FIBER:
1792	case MEDIA_MODULE_FIBER:
1793		port_type = PORT_FIBRE;
1794		break;
1795	case MEDIA_DA_TWINAX:
1796		port_type = PORT_DA;
1797		break;
1798	case MEDIA_BASE_T:
1799		port_type = PORT_TP;
1800		break;
1801	case MEDIA_KR:
1802	case MEDIA_NOT_PRESENT:
1803		port_type = PORT_NONE;
1804		break;
1805	case MEDIA_UNSPECIFIED:
1806	default:
1807		port_type = PORT_OTHER;
1808		break;
1809	}
1810	return port_type;
1811}
1812
1813static int qed_get_link_data(struct qed_hwfn *hwfn,
1814			     struct qed_mcp_link_params *params,
1815			     struct qed_mcp_link_state *link,
1816			     struct qed_mcp_link_capabilities *link_caps)
1817{
1818	void *p;
1819
1820	if (!IS_PF(hwfn->cdev)) {
1821		qed_vf_get_link_params(hwfn, params);
1822		qed_vf_get_link_state(hwfn, link);
1823		qed_vf_get_link_caps(hwfn, link_caps);
1824
1825		return 0;
1826	}
1827
1828	p = qed_mcp_get_link_params(hwfn);
1829	if (!p)
1830		return -ENXIO;
1831	memcpy(params, p, sizeof(*params));
1832
1833	p = qed_mcp_get_link_state(hwfn);
1834	if (!p)
1835		return -ENXIO;
1836	memcpy(link, p, sizeof(*link));
1837
1838	p = qed_mcp_get_link_capabilities(hwfn);
1839	if (!p)
1840		return -ENXIO;
1841	memcpy(link_caps, p, sizeof(*link_caps));
1842
1843	return 0;
1844}
1845
1846static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1847				     struct qed_ptt *ptt, u32 capability,
1848				     unsigned long *if_caps)
1849{
1850	u32 media_type, tcvr_state, tcvr_type;
1851	u32 speed_mask, board_cfg;
1852
1853	if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1854		media_type = MEDIA_UNSPECIFIED;
1855
1856	if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1857		tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1858
1859	if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1860		speed_mask = 0xFFFFFFFF;
1861
1862	if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1863		board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1864
1865	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1866		   "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1867		   media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1868
1869	switch (media_type) {
1870	case MEDIA_DA_TWINAX:
1871		phylink_set(if_caps, FIBRE);
1872
1873		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1874			phylink_set(if_caps, 20000baseKR2_Full);
1875
1876		/* For DAC media multiple speed capabilities are supported */
1877		capability |= speed_mask;
1878
1879		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1880			phylink_set(if_caps, 1000baseKX_Full);
1881		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1882			phylink_set(if_caps, 10000baseCR_Full);
1883
1884		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1885			switch (tcvr_type) {
1886			case ETH_TRANSCEIVER_TYPE_40G_CR4:
1887			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
1888			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1889				phylink_set(if_caps, 40000baseCR4_Full);
1890				break;
1891			default:
1892				break;
1893			}
1894
1895		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1896			phylink_set(if_caps, 25000baseCR_Full);
1897		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1898			phylink_set(if_caps, 50000baseCR2_Full);
1899
1900		if (capability &
1901		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1902			switch (tcvr_type) {
1903			case ETH_TRANSCEIVER_TYPE_100G_CR4:
1904			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1905				phylink_set(if_caps, 100000baseCR4_Full);
1906				break;
1907			default:
1908				break;
1909			}
1910
1911		break;
1912	case MEDIA_BASE_T:
1913		phylink_set(if_caps, TP);
1914
1915		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1916			if (capability &
1917			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1918				phylink_set(if_caps, 1000baseT_Full);
1919			if (capability &
1920			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1921				phylink_set(if_caps, 10000baseT_Full);
1922		}
1923
1924		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1925			phylink_set(if_caps, FIBRE);
1926
1927			switch (tcvr_type) {
1928			case ETH_TRANSCEIVER_TYPE_1000BASET:
1929				phylink_set(if_caps, 1000baseT_Full);
1930				break;
1931			case ETH_TRANSCEIVER_TYPE_10G_BASET:
1932				phylink_set(if_caps, 10000baseT_Full);
1933				break;
1934			default:
1935				break;
1936			}
1937		}
1938
1939		break;
1940	case MEDIA_SFP_1G_FIBER:
1941	case MEDIA_SFPP_10G_FIBER:
1942	case MEDIA_XFP_FIBER:
1943	case MEDIA_MODULE_FIBER:
1944		phylink_set(if_caps, FIBRE);
1945		capability |= speed_mask;
1946
1947		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1948			switch (tcvr_type) {
1949			case ETH_TRANSCEIVER_TYPE_1G_LX:
1950			case ETH_TRANSCEIVER_TYPE_1G_SX:
1951			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1952			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1953				phylink_set(if_caps, 1000baseKX_Full);
1954				break;
1955			default:
1956				break;
1957			}
1958
1959		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1960			switch (tcvr_type) {
1961			case ETH_TRANSCEIVER_TYPE_10G_SR:
1962			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1963			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1964			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1965				phylink_set(if_caps, 10000baseSR_Full);
1966				break;
1967			case ETH_TRANSCEIVER_TYPE_10G_LR:
1968			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1969			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
1970			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1971				phylink_set(if_caps, 10000baseLR_Full);
1972				break;
1973			case ETH_TRANSCEIVER_TYPE_10G_LRM:
1974				phylink_set(if_caps, 10000baseLRM_Full);
1975				break;
1976			case ETH_TRANSCEIVER_TYPE_10G_ER:
1977				phylink_set(if_caps, 10000baseR_FEC);
1978				break;
1979			default:
1980				break;
1981			}
1982
1983		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1984			phylink_set(if_caps, 20000baseKR2_Full);
1985
1986		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1987			switch (tcvr_type) {
1988			case ETH_TRANSCEIVER_TYPE_25G_SR:
1989			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1990				phylink_set(if_caps, 25000baseSR_Full);
1991				break;
1992			default:
1993				break;
1994			}
1995
1996		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1997			switch (tcvr_type) {
1998			case ETH_TRANSCEIVER_TYPE_40G_LR4:
1999			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2000			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2001				phylink_set(if_caps, 40000baseLR4_Full);
2002				break;
2003			case ETH_TRANSCEIVER_TYPE_40G_SR4:
2004			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2005			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2006				phylink_set(if_caps, 40000baseSR4_Full);
2007				break;
2008			default:
2009				break;
2010			}
2011
2012		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2013			phylink_set(if_caps, 50000baseKR2_Full);
2014
2015		if (capability &
2016		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2017			switch (tcvr_type) {
2018			case ETH_TRANSCEIVER_TYPE_100G_SR4:
2019			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2020				phylink_set(if_caps, 100000baseSR4_Full);
2021				break;
2022			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2023				phylink_set(if_caps, 100000baseLR4_ER4_Full);
2024				break;
2025			default:
2026				break;
2027			}
2028
2029		break;
2030	case MEDIA_KR:
2031		phylink_set(if_caps, Backplane);
2032
2033		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2034			phylink_set(if_caps, 20000baseKR2_Full);
2035		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
2036			phylink_set(if_caps, 1000baseKX_Full);
2037		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
2038			phylink_set(if_caps, 10000baseKR_Full);
2039		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2040			phylink_set(if_caps, 25000baseKR_Full);
2041		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2042			phylink_set(if_caps, 40000baseKR4_Full);
2043		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2044			phylink_set(if_caps, 50000baseKR2_Full);
2045		if (capability &
2046		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2047			phylink_set(if_caps, 100000baseKR4_Full);
2048
2049		break;
2050	case MEDIA_UNSPECIFIED:
2051	case MEDIA_NOT_PRESENT:
2052	default:
2053		DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
2054			   "Unknown media and transceiver type;\n");
2055		break;
2056	}
2057}
2058
2059static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
2060{
2061	*speed_mask = 0;
2062
2063	if (caps &
2064	    (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
2065		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2066	if (caps & QED_LINK_PARTNER_SPEED_10G)
2067		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2068	if (caps & QED_LINK_PARTNER_SPEED_20G)
2069		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
2070	if (caps & QED_LINK_PARTNER_SPEED_25G)
2071		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2072	if (caps & QED_LINK_PARTNER_SPEED_40G)
2073		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2074	if (caps & QED_LINK_PARTNER_SPEED_50G)
2075		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
2076	if (caps & QED_LINK_PARTNER_SPEED_100G)
2077		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
2078}
2079
2080static void qed_fill_link(struct qed_hwfn *hwfn,
2081			  struct qed_ptt *ptt,
2082			  struct qed_link_output *if_link)
2083{
2084	struct qed_mcp_link_capabilities link_caps;
2085	struct qed_mcp_link_params params;
2086	struct qed_mcp_link_state link;
2087	u32 media_type, speed_mask;
2088
2089	memset(if_link, 0, sizeof(*if_link));
2090
2091	/* Prepare source inputs */
2092	if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
2093		dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
2094		return;
2095	}
2096
2097	/* Set the link parameters to pass to protocol driver */
2098	if (link.link_up)
2099		if_link->link_up = true;
2100
2101	if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) {
2102		if (link_caps.default_ext_autoneg)
2103			phylink_set(if_link->supported_caps, Autoneg);
2104
2105		linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2106
2107		if (params.ext_speed.autoneg)
2108			phylink_set(if_link->advertised_caps, Autoneg);
2109		else
2110			phylink_clear(if_link->advertised_caps, Autoneg);
2111
2112		qed_fill_link_capability(hwfn, ptt,
2113					 params.ext_speed.advertised_speeds,
2114					 if_link->advertised_caps);
2115	} else {
2116		if (link_caps.default_speed_autoneg)
2117			phylink_set(if_link->supported_caps, Autoneg);
2118
2119		linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2120
2121		if (params.speed.autoneg)
2122			phylink_set(if_link->advertised_caps, Autoneg);
2123		else
2124			phylink_clear(if_link->advertised_caps, Autoneg);
2125	}
2126
2127	if (params.pause.autoneg ||
2128	    (params.pause.forced_rx && params.pause.forced_tx))
2129		phylink_set(if_link->supported_caps, Asym_Pause);
2130	if (params.pause.autoneg || params.pause.forced_rx ||
2131	    params.pause.forced_tx)
2132		phylink_set(if_link->supported_caps, Pause);
2133
2134	if_link->sup_fec = link_caps.fec_default;
2135	if_link->active_fec = params.fec;
2136
2137	/* Fill link advertised capability */
2138	qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
2139				 if_link->advertised_caps);
2140
2141	/* Fill link supported capability */
2142	qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
2143				 if_link->supported_caps);
2144
2145	/* Fill partner advertised capability */
2146	qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
2147	qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
2148
2149	if (link.link_up)
2150		if_link->speed = link.speed;
2151
2152	/* TODO - fill duplex properly */
2153	if_link->duplex = DUPLEX_FULL;
2154	qed_mcp_get_media_type(hwfn, ptt, &media_type);
2155	if_link->port = qed_get_port_type(media_type);
2156
2157	if_link->autoneg = params.speed.autoneg;
2158
2159	if (params.pause.autoneg)
2160		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2161	if (params.pause.forced_rx)
2162		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2163	if (params.pause.forced_tx)
2164		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2165
2166	if (link.an_complete)
2167		phylink_set(if_link->lp_caps, Autoneg);
2168	if (link.partner_adv_pause)
2169		phylink_set(if_link->lp_caps, Pause);
2170	if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
2171	    link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
2172		phylink_set(if_link->lp_caps, Asym_Pause);
2173
2174	if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
2175		if_link->eee_supported = false;
2176	} else {
2177		if_link->eee_supported = true;
2178		if_link->eee_active = link.eee_active;
2179		if_link->sup_caps = link_caps.eee_speed_caps;
2180		/* MFW clears adv_caps on eee disable; use configured value */
2181		if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
2182					params.eee.adv_caps;
2183		if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
2184		if_link->eee.enable = params.eee.enable;
2185		if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
2186		if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
2187	}
2188}
2189
2190static void qed_get_current_link(struct qed_dev *cdev,
2191				 struct qed_link_output *if_link)
2192{
2193	struct qed_hwfn *hwfn;
2194	struct qed_ptt *ptt;
2195	int i;
2196
2197	hwfn = &cdev->hwfns[0];
2198	if (IS_PF(cdev)) {
2199		ptt = qed_ptt_acquire(hwfn);
2200		if (ptt) {
2201			qed_fill_link(hwfn, ptt, if_link);
2202			qed_ptt_release(hwfn, ptt);
2203		} else {
2204			DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
2205		}
2206	} else {
2207		qed_fill_link(hwfn, NULL, if_link);
2208	}
2209
2210	for_each_hwfn(cdev, i)
2211		qed_inform_vf_link_state(&cdev->hwfns[i]);
2212}
2213
2214void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2215{
2216	void *cookie = hwfn->cdev->ops_cookie;
2217	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2218	struct qed_link_output if_link;
2219
2220	qed_fill_link(hwfn, ptt, &if_link);
2221	qed_inform_vf_link_state(hwfn);
2222
2223	if (IS_LEAD_HWFN(hwfn) && cookie)
2224		op->link_update(cookie, &if_link);
2225}
2226
2227void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2228{
2229	void *cookie = hwfn->cdev->ops_cookie;
2230	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2231
2232	if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
2233		op->bw_update(cookie);
2234}
2235
2236static int qed_drain(struct qed_dev *cdev)
2237{
2238	struct qed_hwfn *hwfn;
2239	struct qed_ptt *ptt;
2240	int i, rc;
2241
2242	if (IS_VF(cdev))
2243		return 0;
2244
2245	for_each_hwfn(cdev, i) {
2246		hwfn = &cdev->hwfns[i];
2247		ptt = qed_ptt_acquire(hwfn);
2248		if (!ptt) {
2249			DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
2250			return -EBUSY;
2251		}
2252		rc = qed_mcp_drain(hwfn, ptt);
2253		qed_ptt_release(hwfn, ptt);
2254		if (rc)
2255			return rc;
2256	}
2257
2258	return 0;
2259}
2260
2261static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
2262					  struct qed_nvm_image_att *nvm_image,
2263					  u32 *crc)
2264{
2265	u8 *buf = NULL;
2266	int rc;
2267
2268	/* Allocate a buffer for holding the nvram image */
2269	buf = kzalloc(nvm_image->length, GFP_KERNEL);
2270	if (!buf)
2271		return -ENOMEM;
2272
2273	/* Read image into buffer */
2274	rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
2275			      buf, nvm_image->length);
2276	if (rc) {
2277		DP_ERR(cdev, "Failed reading image from nvm\n");
2278		goto out;
2279	}
2280
2281	/* Convert the buffer into big-endian format (excluding the
2282	 * closing 4 bytes of CRC).
2283	 */
2284	cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
2285			  DIV_ROUND_UP(nvm_image->length - 4, 4));
2286
2287	/* Calc CRC for the "actual" image buffer, i.e. not including
2288	 * the last 4 CRC bytes.
2289	 */
2290	*crc = ~crc32(~0U, buf, nvm_image->length - 4);
2291	*crc = (__force u32)cpu_to_be32p(crc);
2292
2293out:
2294	kfree(buf);
2295
2296	return rc;
2297}
2298
2299/* Binary file format -
2300 *     /----------------------------------------------------------------------\
2301 * 0B  |                       0x4 [command index]                            |
2302 * 4B  | image_type     | Options        |  Number of register settings       |
2303 * 8B  |                       Value                                          |
2304 * 12B |                       Mask                                           |
2305 * 16B |                       Offset                                         |
2306 *     \----------------------------------------------------------------------/
2307 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2308 * Options - 0'b - Calculate & Update CRC for image
2309 */
2310static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2311				      bool *check_resp)
2312{
2313	struct qed_nvm_image_att nvm_image;
2314	struct qed_hwfn *p_hwfn;
2315	bool is_crc = false;
2316	u32 image_type;
2317	int rc = 0, i;
2318	u16 len;
2319
2320	*data += 4;
2321	image_type = **data;
2322	p_hwfn = QED_LEADING_HWFN(cdev);
2323	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2324		if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2325			break;
2326	if (i == p_hwfn->nvm_info.num_images) {
2327		DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2328		       image_type);
2329		return -ENOENT;
2330	}
2331
2332	nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2333	nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2334
2335	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2336		   "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2337		   **data, image_type, nvm_image.start_addr,
2338		   nvm_image.start_addr + nvm_image.length - 1);
2339	(*data)++;
2340	is_crc = !!(**data & BIT(0));
2341	(*data)++;
2342	len = *((u16 *)*data);
2343	*data += 2;
2344	if (is_crc) {
2345		u32 crc = 0;
2346
2347		rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2348		if (rc) {
2349			DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2350			goto exit;
2351		}
2352
2353		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2354				       (nvm_image.start_addr +
2355					nvm_image.length - 4), (u8 *)&crc, 4);
2356		if (rc)
2357			DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2358			       nvm_image.start_addr + nvm_image.length - 4, rc);
2359		goto exit;
2360	}
2361
2362	/* Iterate over the values for setting */
2363	while (len) {
2364		u32 offset, mask, value, cur_value;
2365		u8 buf[4];
2366
2367		value = *((u32 *)*data);
2368		*data += 4;
2369		mask = *((u32 *)*data);
2370		*data += 4;
2371		offset = *((u32 *)*data);
2372		*data += 4;
2373
2374		rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2375				      4);
2376		if (rc) {
2377			DP_ERR(cdev, "Failed reading from %08x\n",
2378			       nvm_image.start_addr + offset);
2379			goto exit;
2380		}
2381
2382		cur_value = le32_to_cpu(*((__le32 *)buf));
2383		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2384			   "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2385			   nvm_image.start_addr + offset, cur_value,
2386			   (cur_value & ~mask) | (value & mask), value, mask);
2387		value = (value & mask) | (cur_value & ~mask);
2388		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2389				       nvm_image.start_addr + offset,
2390				       (u8 *)&value, 4);
2391		if (rc) {
2392			DP_ERR(cdev, "Failed writing to %08x\n",
2393			       nvm_image.start_addr + offset);
2394			goto exit;
2395		}
2396
2397		len--;
2398	}
2399exit:
2400	return rc;
2401}
2402
2403/* Binary file format -
2404 *     /----------------------------------------------------------------------\
2405 * 0B  |                       0x3 [command index]                            |
2406 * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2407 * 8B  | File-type |                   reserved                               |
2408 * 12B |                    Image length in bytes                             |
2409 *     \----------------------------------------------------------------------/
2410 *     Start a new file of the provided type
2411 */
2412static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2413					  const u8 **data, bool *check_resp)
2414{
2415	u32 file_type, file_size = 0;
2416	int rc;
2417
2418	*data += 4;
2419	*check_resp = !!(**data & BIT(0));
2420	*data += 4;
2421	file_type = **data;
2422
2423	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2424		   "About to start a new file of type %02x\n", file_type);
2425	if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2426		*data += 4;
2427		file_size = *((u32 *)(*data));
2428	}
2429
2430	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2431			       (u8 *)(&file_size), 4);
2432	*data += 4;
2433
2434	return rc;
2435}
2436
2437/* Binary file format -
2438 *     /----------------------------------------------------------------------\
2439 * 0B  |                       0x2 [command index]                            |
2440 * 4B  |                       Length in bytes                                |
2441 * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2442 * 12B |                       Offset in bytes                                |
2443 * 16B |                       Data ...                                       |
2444 *     \----------------------------------------------------------------------/
2445 *     Write data as part of a file that was previously started. Data should be
2446 *     of length equal to that provided in the message
2447 */
2448static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2449					 const u8 **data, bool *check_resp)
2450{
2451	u32 offset, len;
2452	int rc;
2453
2454	*data += 4;
2455	len = *((u32 *)(*data));
2456	*data += 4;
2457	*check_resp = !!(**data & BIT(0));
2458	*data += 4;
2459	offset = *((u32 *)(*data));
2460	*data += 4;
2461
2462	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2463		   "About to write File-data: %08x bytes to offset %08x\n",
2464		   len, offset);
2465
2466	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2467			       (char *)(*data), len);
2468	*data += len;
2469
2470	return rc;
2471}
2472
2473/* Binary file format [General header] -
2474 *     /----------------------------------------------------------------------\
2475 * 0B  |                       QED_NVM_SIGNATURE                              |
2476 * 4B  |                       Length in bytes                                |
2477 * 8B  | Highest command in this batchfile |          Reserved                |
2478 *     \----------------------------------------------------------------------/
2479 */
2480static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2481					const struct firmware *image,
2482					const u8 **data)
2483{
2484	u32 signature, len;
2485
2486	/* Check minimum size */
2487	if (image->size < 12) {
2488		DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2489		return -EINVAL;
2490	}
2491
2492	/* Check signature */
2493	signature = *((u32 *)(*data));
2494	if (signature != QED_NVM_SIGNATURE) {
2495		DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2496		return -EINVAL;
2497	}
2498
2499	*data += 4;
2500	/* Validate internal size equals the image-size */
2501	len = *((u32 *)(*data));
2502	if (len != image->size) {
2503		DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2504		       len, (u32)image->size);
2505		return -EINVAL;
2506	}
2507
2508	*data += 4;
2509	/* Make sure driver familiar with all commands necessary for this */
2510	if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2511		DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2512		       *((u16 *)(*data)));
2513		return -EINVAL;
2514	}
2515
2516	*data += 4;
2517
2518	return 0;
2519}
2520
2521/* Binary file format -
2522 *     /----------------------------------------------------------------------\
2523 * 0B  |                       0x5 [command index]                            |
2524 * 4B  | Number of config attributes     |          Reserved                  |
2525 * 4B  | Config ID                       | Entity ID      | Length            |
2526 * 4B  | Value                                                                |
2527 *     |                                                                      |
2528 *     \----------------------------------------------------------------------/
2529 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2530 * 'Number of config attributes'.
2531 *
2532 * The API parses config attributes from the user provided buffer and flashes
2533 * them to the respective NVM path using Management FW inerface.
2534 */
2535static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2536{
2537	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2538	u8 entity_id, len, buf[32];
2539	bool need_nvm_init = true;
2540	struct qed_ptt *ptt;
2541	u16 cfg_id, count;
2542	int rc = 0, i;
2543	u32 flags;
2544
2545	ptt = qed_ptt_acquire(hwfn);
2546	if (!ptt)
2547		return -EAGAIN;
2548
2549	/* NVM CFG ID attribute header */
2550	*data += 4;
2551	count = *((u16 *)*data);
2552	*data += 4;
2553
2554	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2555		   "Read config ids: num_attrs = %0d\n", count);
2556	/* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2557	 * arithmetic operations in the implementation.
2558	 */
2559	for (i = 1; i <= count; i++) {
2560		cfg_id = *((u16 *)*data);
2561		*data += 2;
2562		entity_id = **data;
2563		(*data)++;
2564		len = **data;
2565		(*data)++;
2566		memcpy(buf, *data, len);
2567		*data += len;
2568
2569		flags = 0;
2570		if (need_nvm_init) {
2571			flags |= QED_NVM_CFG_OPTION_INIT;
2572			need_nvm_init = false;
2573		}
2574
2575		/* Commit to flash and free the resources */
2576		if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2577			flags |= QED_NVM_CFG_OPTION_COMMIT |
2578				 QED_NVM_CFG_OPTION_FREE;
2579			need_nvm_init = true;
2580		}
2581
2582		if (entity_id)
2583			flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2584
2585		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2586			   "cfg_id = %d entity = %d len = %d\n", cfg_id,
2587			   entity_id, len);
2588		rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2589					 buf, len);
2590		if (rc) {
2591			DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2592			break;
2593		}
2594	}
2595
2596	qed_ptt_release(hwfn, ptt);
2597
2598	return rc;
2599}
2600
2601#define QED_MAX_NVM_BUF_LEN	32
2602static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2603{
2604	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2605	u8 buf[QED_MAX_NVM_BUF_LEN];
2606	struct qed_ptt *ptt;
2607	u32 len;
2608	int rc;
2609
2610	ptt = qed_ptt_acquire(hwfn);
2611	if (!ptt)
2612		return QED_MAX_NVM_BUF_LEN;
2613
2614	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2615				 &len);
2616	if (rc || !len) {
2617		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2618		len = QED_MAX_NVM_BUF_LEN;
2619	}
2620
2621	qed_ptt_release(hwfn, ptt);
2622
2623	return len;
2624}
2625
2626static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2627				  u32 cmd, u32 entity_id)
2628{
2629	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2630	struct qed_ptt *ptt;
2631	u32 flags, len;
2632	int rc = 0;
2633
2634	ptt = qed_ptt_acquire(hwfn);
2635	if (!ptt)
2636		return -EAGAIN;
2637
2638	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2639		   "Read config cmd = %d entity id %d\n", cmd, entity_id);
2640	flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2641	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2642	if (rc)
2643		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2644
2645	qed_ptt_release(hwfn, ptt);
2646
2647	return rc;
2648}
2649
2650static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2651{
2652	const struct firmware *image;
2653	const u8 *data, *data_end;
2654	u32 cmd_type;
2655	int rc;
2656
2657	rc = request_firmware(&image, name, &cdev->pdev->dev);
2658	if (rc) {
2659		DP_ERR(cdev, "Failed to find '%s'\n", name);
2660		return rc;
2661	}
2662
2663	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2664		   "Flashing '%s' - firmware's data at %p, size is %08x\n",
2665		   name, image->data, (u32)image->size);
2666	data = image->data;
2667	data_end = data + image->size;
2668
2669	rc = qed_nvm_flash_image_validate(cdev, image, &data);
2670	if (rc)
2671		goto exit;
2672
2673	while (data < data_end) {
2674		bool check_resp = false;
2675
2676		/* Parse the actual command */
2677		cmd_type = *((u32 *)data);
2678		switch (cmd_type) {
2679		case QED_NVM_FLASH_CMD_FILE_DATA:
2680			rc = qed_nvm_flash_image_file_data(cdev, &data,
2681							   &check_resp);
2682			break;
2683		case QED_NVM_FLASH_CMD_FILE_START:
2684			rc = qed_nvm_flash_image_file_start(cdev, &data,
2685							    &check_resp);
2686			break;
2687		case QED_NVM_FLASH_CMD_NVM_CHANGE:
2688			rc = qed_nvm_flash_image_access(cdev, &data,
2689							&check_resp);
2690			break;
2691		case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2692			rc = qed_nvm_flash_cfg_write(cdev, &data);
2693			break;
2694		default:
2695			DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2696			rc = -EINVAL;
2697			goto exit;
2698		}
2699
2700		if (rc) {
2701			DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2702			goto exit;
2703		}
2704
2705		/* Check response if needed */
2706		if (check_resp) {
2707			u32 mcp_response = 0;
2708
2709			if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2710				DP_ERR(cdev, "Failed getting MCP response\n");
2711				rc = -EINVAL;
2712				goto exit;
2713			}
2714
2715			switch (mcp_response & FW_MSG_CODE_MASK) {
2716			case FW_MSG_CODE_OK:
2717			case FW_MSG_CODE_NVM_OK:
2718			case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2719			case FW_MSG_CODE_PHY_OK:
2720				break;
2721			default:
2722				DP_ERR(cdev, "MFW returns error: %08x\n",
2723				       mcp_response);
2724				rc = -EINVAL;
2725				goto exit;
2726			}
2727		}
2728	}
2729
2730exit:
2731	release_firmware(image);
2732
2733	return rc;
2734}
2735
2736static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2737			     u8 *buf, u16 len)
2738{
2739	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2740
2741	return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2742}
2743
2744void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2745{
2746	struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2747	void *cookie = p_hwfn->cdev->ops_cookie;
2748
2749	if (ops && ops->schedule_recovery_handler)
2750		ops->schedule_recovery_handler(cookie);
2751}
2752
2753static const char * const qed_hw_err_type_descr[] = {
2754	[QED_HW_ERR_FAN_FAIL]		= "Fan Failure",
2755	[QED_HW_ERR_MFW_RESP_FAIL]	= "MFW Response Failure",
2756	[QED_HW_ERR_HW_ATTN]		= "HW Attention",
2757	[QED_HW_ERR_DMAE_FAIL]		= "DMAE Failure",
2758	[QED_HW_ERR_RAMROD_FAIL]	= "Ramrod Failure",
2759	[QED_HW_ERR_FW_ASSERT]		= "FW Assertion",
2760	[QED_HW_ERR_LAST]		= "Unknown",
2761};
2762
2763void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
2764			   enum qed_hw_err_type err_type)
2765{
2766	struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2767	void *cookie = p_hwfn->cdev->ops_cookie;
2768	const char *err_str;
2769
2770	if (err_type > QED_HW_ERR_LAST)
2771		err_type = QED_HW_ERR_LAST;
2772	err_str = qed_hw_err_type_descr[err_type];
2773
2774	DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
2775
2776	/* Call the HW error handler of the protocol driver.
2777	 * If it is not available - perform a minimal handling of preventing
2778	 * HW attentions from being reasserted.
2779	 */
2780	if (ops && ops->schedule_hw_err_handler)
2781		ops->schedule_hw_err_handler(cookie, err_type);
2782	else
2783		qed_int_attn_clr_enable(p_hwfn->cdev, true);
2784}
2785
2786static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2787			    void *handle)
2788{
2789		return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2790}
2791
2792static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2793{
2794	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2795	struct qed_ptt *ptt;
2796	int status = 0;
2797
2798	ptt = qed_ptt_acquire(hwfn);
2799	if (!ptt)
2800		return -EAGAIN;
2801
2802	status = qed_mcp_set_led(hwfn, ptt, mode);
2803
2804	qed_ptt_release(hwfn, ptt);
2805
2806	return status;
2807}
2808
2809int qed_recovery_process(struct qed_dev *cdev)
2810{
2811	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2812	struct qed_ptt *p_ptt;
2813	int rc = 0;
2814
2815	p_ptt = qed_ptt_acquire(p_hwfn);
2816	if (!p_ptt)
2817		return -EAGAIN;
2818
2819	rc = qed_start_recovery_process(p_hwfn, p_ptt);
2820
2821	qed_ptt_release(p_hwfn, p_ptt);
2822
2823	return rc;
2824}
2825
2826static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2827{
2828	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2829	struct qed_ptt *ptt;
2830	int rc = 0;
2831
2832	if (IS_VF(cdev))
2833		return 0;
2834
2835	ptt = qed_ptt_acquire(hwfn);
2836	if (!ptt)
2837		return -EAGAIN;
2838
2839	rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2840				   : QED_OV_WOL_DISABLED);
2841	if (rc)
2842		goto out;
2843	rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2844
2845out:
2846	qed_ptt_release(hwfn, ptt);
2847	return rc;
2848}
2849
2850static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2851{
2852	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2853	struct qed_ptt *ptt;
2854	int status = 0;
2855
2856	if (IS_VF(cdev))
2857		return 0;
2858
2859	ptt = qed_ptt_acquire(hwfn);
2860	if (!ptt)
2861		return -EAGAIN;
2862
2863	status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2864						QED_OV_DRIVER_STATE_ACTIVE :
2865						QED_OV_DRIVER_STATE_DISABLED);
2866
2867	qed_ptt_release(hwfn, ptt);
2868
2869	return status;
2870}
2871
2872static int qed_update_mac(struct qed_dev *cdev, const u8 *mac)
2873{
2874	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2875	struct qed_ptt *ptt;
2876	int status = 0;
2877
2878	if (IS_VF(cdev))
2879		return 0;
2880
2881	ptt = qed_ptt_acquire(hwfn);
2882	if (!ptt)
2883		return -EAGAIN;
2884
2885	status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2886	if (status)
2887		goto out;
2888
2889	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2890
2891out:
2892	qed_ptt_release(hwfn, ptt);
2893	return status;
2894}
2895
2896static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2897{
2898	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2899	struct qed_ptt *ptt;
2900	int status = 0;
2901
2902	if (IS_VF(cdev))
2903		return 0;
2904
2905	ptt = qed_ptt_acquire(hwfn);
2906	if (!ptt)
2907		return -EAGAIN;
2908
2909	status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2910	if (status)
2911		goto out;
2912
2913	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2914
2915out:
2916	qed_ptt_release(hwfn, ptt);
2917	return status;
2918}
2919
2920static int
2921qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb,
2922		u16 qid, struct qed_sb_info_dbg *sb_dbg)
2923{
2924	struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns];
2925	struct qed_ptt *ptt;
2926	int rc;
2927
2928	if (IS_VF(cdev))
2929		return -EINVAL;
2930
2931	ptt = qed_ptt_acquire(hwfn);
2932	if (!ptt) {
2933		DP_NOTICE(hwfn, "Can't acquire PTT\n");
2934		return -EAGAIN;
2935	}
2936
2937	memset(sb_dbg, 0, sizeof(*sb_dbg));
2938	rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
2939
2940	qed_ptt_release(hwfn, ptt);
2941	return rc;
2942}
2943
2944static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2945				  u8 dev_addr, u32 offset, u32 len)
2946{
2947	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2948	struct qed_ptt *ptt;
2949	int rc = 0;
2950
2951	if (IS_VF(cdev))
2952		return 0;
2953
2954	ptt = qed_ptt_acquire(hwfn);
2955	if (!ptt)
2956		return -EAGAIN;
2957
2958	rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2959				  offset, len, buf);
2960
2961	qed_ptt_release(hwfn, ptt);
2962
2963	return rc;
2964}
2965
2966static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2967{
2968	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2969	struct qed_ptt *ptt;
2970	int rc = 0;
2971
2972	if (IS_VF(cdev))
2973		return 0;
2974
2975	ptt = qed_ptt_acquire(hwfn);
2976	if (!ptt)
2977		return -EAGAIN;
2978
2979	rc = qed_dbg_grc_config(hwfn, cfg_id, val);
2980
2981	qed_ptt_release(hwfn, ptt);
2982
2983	return rc;
2984}
2985
2986static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...)
2987{
2988	char buf[QED_MFW_REPORT_STR_SIZE];
2989	struct qed_hwfn *p_hwfn;
2990	struct qed_ptt *p_ptt;
2991	va_list vl;
2992
2993	va_start(vl, fmt);
2994	vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl);
2995	va_end(vl);
2996
2997	if (IS_PF(cdev)) {
2998		p_hwfn = QED_LEADING_HWFN(cdev);
2999		p_ptt = qed_ptt_acquire(p_hwfn);
3000		if (p_ptt) {
3001			qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf));
3002			qed_ptt_release(p_hwfn, p_ptt);
3003		}
3004	}
3005}
3006
3007static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
3008{
3009	return QED_AFFIN_HWFN_IDX(cdev);
3010}
3011
3012static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active)
3013{
3014	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
3015	struct qed_ptt *ptt;
3016	int rc = 0;
3017
3018	*esl_active = false;
3019
3020	if (IS_VF(cdev))
3021		return 0;
3022
3023	ptt = qed_ptt_acquire(hwfn);
3024	if (!ptt)
3025		return -EAGAIN;
3026
3027	rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active);
3028
3029	qed_ptt_release(hwfn, ptt);
3030
3031	return rc;
3032}
3033
3034static struct qed_selftest_ops qed_selftest_ops_pass = {
3035	.selftest_memory = &qed_selftest_memory,
3036	.selftest_interrupt = &qed_selftest_interrupt,
3037	.selftest_register = &qed_selftest_register,
3038	.selftest_clock = &qed_selftest_clock,
3039	.selftest_nvram = &qed_selftest_nvram,
3040};
3041
3042const struct qed_common_ops qed_common_ops_pass = {
3043	.selftest = &qed_selftest_ops_pass,
3044	.probe = &qed_probe,
3045	.remove = &qed_remove,
3046	.set_power_state = &qed_set_power_state,
3047	.set_name = &qed_set_name,
3048	.update_pf_params = &qed_update_pf_params,
3049	.slowpath_start = &qed_slowpath_start,
3050	.slowpath_stop = &qed_slowpath_stop,
3051	.set_fp_int = &qed_set_int_fp,
3052	.get_fp_int = &qed_get_int_fp,
3053	.sb_init = &qed_sb_init,
3054	.sb_release = &qed_sb_release,
3055	.simd_handler_config = &qed_simd_handler_config,
3056	.simd_handler_clean = &qed_simd_handler_clean,
3057	.dbg_grc = &qed_dbg_grc,
3058	.dbg_grc_size = &qed_dbg_grc_size,
3059	.can_link_change = &qed_can_link_change,
3060	.set_link = &qed_set_link,
3061	.get_link = &qed_get_current_link,
3062	.drain = &qed_drain,
3063	.update_msglvl = &qed_init_dp,
3064	.devlink_register = qed_devlink_register,
3065	.devlink_unregister = qed_devlink_unregister,
3066	.report_fatal_error = qed_report_fatal_error,
3067	.dbg_all_data = &qed_dbg_all_data,
3068	.dbg_all_data_size = &qed_dbg_all_data_size,
3069	.chain_alloc = &qed_chain_alloc,
3070	.chain_free = &qed_chain_free,
3071	.nvm_flash = &qed_nvm_flash,
3072	.nvm_get_image = &qed_nvm_get_image,
3073	.set_coalesce = &qed_set_coalesce,
3074	.set_led = &qed_set_led,
3075	.recovery_process = &qed_recovery_process,
3076	.recovery_prolog = &qed_recovery_prolog,
3077	.attn_clr_enable = &qed_int_attn_clr_enable,
3078	.update_drv_state = &qed_update_drv_state,
3079	.update_mac = &qed_update_mac,
3080	.update_mtu = &qed_update_mtu,
3081	.update_wol = &qed_update_wol,
3082	.db_recovery_add = &qed_db_recovery_add,
3083	.db_recovery_del = &qed_db_recovery_del,
3084	.read_module_eeprom = &qed_read_module_eeprom,
3085	.get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
3086	.read_nvm_cfg = &qed_nvm_flash_cfg_read,
3087	.read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
3088	.set_grc_config = &qed_set_grc_config,
3089	.mfw_report = &qed_mfw_report,
3090	.get_sb_info = &qed_get_sb_info,
3091	.get_esl_status = &qed_get_esl_status,
3092};
3093
3094void qed_get_protocol_stats(struct qed_dev *cdev,
3095			    enum qed_mcp_protocol_type type,
3096			    union qed_mcp_protocol_stats *stats)
3097{
3098	struct qed_eth_stats eth_stats;
3099
3100	memset(stats, 0, sizeof(*stats));
3101
3102	switch (type) {
3103	case QED_MCP_LAN_STATS:
3104		qed_get_vport_stats(cdev, &eth_stats);
3105		stats->lan_stats.ucast_rx_pkts =
3106					eth_stats.common.rx_ucast_pkts;
3107		stats->lan_stats.ucast_tx_pkts =
3108					eth_stats.common.tx_ucast_pkts;
3109		stats->lan_stats.fcs_err = -1;
3110		break;
3111	case QED_MCP_FCOE_STATS:
3112		qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
3113		break;
3114	case QED_MCP_ISCSI_STATS:
3115		qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
3116		break;
3117	default:
3118		DP_VERBOSE(cdev, QED_MSG_SP,
3119			   "Invalid protocol type = %d\n", type);
3120		return;
3121	}
3122}
3123
3124int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
3125{
3126	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
3127		   "Scheduling slowpath task [Flag: %d]\n",
3128		   QED_SLOWPATH_MFW_TLV_REQ);
3129	/* Memory barrier for setting atomic bit */
3130	smp_mb__before_atomic();
3131	set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
3132	/* Memory barrier after setting atomic bit */
3133	smp_mb__after_atomic();
3134	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
3135
3136	return 0;
3137}
3138
3139static void
3140qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
3141{
3142	struct qed_common_cb_ops *op = cdev->protocol_ops.common;
3143	struct qed_eth_stats_common *p_common;
3144	struct qed_generic_tlvs gen_tlvs;
3145	struct qed_eth_stats stats;
3146	int i;
3147
3148	memset(&gen_tlvs, 0, sizeof(gen_tlvs));
3149	op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
3150
3151	if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
3152		tlv->flags.ipv4_csum_offload = true;
3153	if (gen_tlvs.feat_flags & QED_TLV_LSO)
3154		tlv->flags.lso_supported = true;
3155	tlv->flags.b_set = true;
3156
3157	for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
3158		if (is_valid_ether_addr(gen_tlvs.mac[i])) {
3159			ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
3160			tlv->mac_set[i] = true;
3161		}
3162	}
3163
3164	qed_get_vport_stats(cdev, &stats);
3165	p_common = &stats.common;
3166	tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
3167			 p_common->rx_bcast_pkts;
3168	tlv->rx_frames_set = true;
3169	tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
3170			p_common->rx_bcast_bytes;
3171	tlv->rx_bytes_set = true;
3172	tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
3173			 p_common->tx_bcast_pkts;
3174	tlv->tx_frames_set = true;
3175	tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
3176			p_common->tx_bcast_bytes;
3177	tlv->rx_bytes_set = true;
3178}
3179
3180int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
3181			  union qed_mfw_tlv_data *tlv_buf)
3182{
3183	struct qed_dev *cdev = hwfn->cdev;
3184	struct qed_common_cb_ops *ops;
3185
3186	ops = cdev->protocol_ops.common;
3187	if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
3188		DP_NOTICE(hwfn, "Can't collect TLV management info\n");
3189		return -EINVAL;
3190	}
3191
3192	switch (type) {
3193	case QED_MFW_TLV_GENERIC:
3194		qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
3195		break;
3196	case QED_MFW_TLV_ETH:
3197		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
3198		break;
3199	case QED_MFW_TLV_FCOE:
3200		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
3201		break;
3202	case QED_MFW_TLV_ISCSI:
3203		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
3204		break;
3205	default:
3206		break;
3207	}
3208
3209	return 0;
3210}
3211
3212unsigned long qed_get_epoch_time(void)
3213{
3214	return ktime_get_real_seconds();
3215}