Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/stddef.h>
   8#include <linux/pci.h>
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/delay.h>
  12#include <asm/byteorder.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/string.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/workqueue.h>
  18#include <linux/ethtool.h>
  19#include <linux/etherdevice.h>
  20#include <linux/vmalloc.h>
  21#include <linux/crash_dump.h>
  22#include <linux/crc32.h>
  23#include <linux/qed/qed_if.h>
  24#include <linux/qed/qed_ll2_if.h>
  25#include <net/devlink.h>
  26#include <linux/phylink.h>
  27
  28#include "qed.h"
  29#include "qed_sriov.h"
  30#include "qed_sp.h"
  31#include "qed_dev_api.h"
  32#include "qed_ll2.h"
  33#include "qed_fcoe.h"
  34#include "qed_iscsi.h"
  35
  36#include "qed_mcp.h"
  37#include "qed_reg_addr.h"
  38#include "qed_hw.h"
  39#include "qed_selftest.h"
  40#include "qed_debug.h"
  41#include "qed_devlink.h"
  42
  43#define QED_ROCE_QPS			(8192)
  44#define QED_ROCE_DPIS			(8)
  45#define QED_RDMA_SRQS                   QED_ROCE_QPS
  46#define QED_NVM_CFG_GET_FLAGS		0xA
  47#define QED_NVM_CFG_GET_PF_FLAGS	0x1A
  48#define QED_NVM_CFG_MAX_ATTRS		50
  49
  50static char version[] =
  51	"QLogic FastLinQ 4xxxx Core Module qed\n";
  52
  53MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
  54MODULE_LICENSE("GPL");
 
  55
  56#define FW_FILE_VERSION				\
  57	__stringify(FW_MAJOR_VERSION) "."	\
  58	__stringify(FW_MINOR_VERSION) "."	\
  59	__stringify(FW_REVISION_VERSION) "."	\
  60	__stringify(FW_ENGINEERING_VERSION)
  61
  62#define QED_FW_FILE_NAME	\
  63	"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
  64
  65MODULE_FIRMWARE(QED_FW_FILE_NAME);
  66
  67/* MFW speed capabilities maps */
  68
  69struct qed_mfw_speed_map {
  70	u32		mfw_val;
  71	__ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
  72
  73	const u32	*cap_arr;
  74	u32		arr_size;
  75};
  76
  77#define QED_MFW_SPEED_MAP(type, arr)		\
  78{						\
  79	.mfw_val	= (type),		\
  80	.cap_arr	= (arr),		\
  81	.arr_size	= ARRAY_SIZE(arr),	\
  82}
  83
  84static const u32 qed_mfw_ext_1g[] __initconst = {
  85	ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
  86	ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
  87	ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
  88};
  89
  90static const u32 qed_mfw_ext_10g[] __initconst = {
  91	ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
  92	ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
  93	ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
  94	ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
  95	ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
  96	ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
  97	ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
  98	ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
  99};
 100
 101static const u32 qed_mfw_ext_25g[] __initconst = {
 102	ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
 103	ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
 104	ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
 105};
 106
 107static const u32 qed_mfw_ext_40g[] __initconst = {
 108	ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
 109	ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
 110	ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
 111	ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
 112};
 113
 114static const u32 qed_mfw_ext_50g_base_r[] __initconst = {
 115	ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
 116	ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
 117	ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
 118	ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
 119	ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
 120};
 121
 122static const u32 qed_mfw_ext_50g_base_r2[] __initconst = {
 123	ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
 124	ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
 125	ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
 126};
 127
 128static const u32 qed_mfw_ext_100g_base_r2[] __initconst = {
 129	ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
 130	ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
 131	ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
 132	ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
 133	ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
 134};
 135
 136static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
 137	ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
 138	ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
 139	ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
 140	ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 141};
 142
 143static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
 144	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
 145	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
 146	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
 147	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
 148	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
 149			  qed_mfw_ext_50g_base_r),
 150	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2,
 151			  qed_mfw_ext_50g_base_r2),
 152	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2,
 153			  qed_mfw_ext_100g_base_r2),
 154	QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4,
 155			  qed_mfw_ext_100g_base_r4),
 156};
 157
 158static const u32 qed_mfw_legacy_1g[] __initconst = {
 159	ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
 160	ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
 161	ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
 162};
 163
 164static const u32 qed_mfw_legacy_10g[] __initconst = {
 165	ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
 166	ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
 167	ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
 168	ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
 169	ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
 170	ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
 171	ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
 172	ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
 173};
 174
 175static const u32 qed_mfw_legacy_20g[] __initconst = {
 176	ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
 177};
 178
 179static const u32 qed_mfw_legacy_25g[] __initconst = {
 180	ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
 181	ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
 182	ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
 183};
 184
 185static const u32 qed_mfw_legacy_40g[] __initconst = {
 186	ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
 187	ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
 188	ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
 189	ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
 190};
 191
 192static const u32 qed_mfw_legacy_50g[] __initconst = {
 193	ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
 194	ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
 195	ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
 196};
 197
 198static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
 199	ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
 200	ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
 201	ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
 202	ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 203};
 204
 205static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
 206	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
 207			  qed_mfw_legacy_1g),
 208	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
 209			  qed_mfw_legacy_10g),
 210	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
 211			  qed_mfw_legacy_20g),
 212	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
 213			  qed_mfw_legacy_25g),
 214	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
 215			  qed_mfw_legacy_40g),
 216	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
 217			  qed_mfw_legacy_50g),
 218	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
 219			  qed_mfw_legacy_bb_100g),
 220};
 221
 222static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
 223{
 224	linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
 225
 226	map->cap_arr = NULL;
 227	map->arr_size = 0;
 228}
 229
 230static void __init qed_mfw_speed_maps_init(void)
 231{
 232	u32 i;
 233
 234	for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++)
 235		qed_mfw_speed_map_populate(qed_mfw_ext_maps + i);
 236
 237	for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
 238		qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
 239}
 240
 241static int __init qed_init(void)
 
 
 
 
 
 
 242{
 243	pr_info("%s", version);
 244
 245	qed_mfw_speed_maps_init();
 
 
 
 
 
 
 
 
 
 246
 247	return 0;
 248}
 249module_init(qed_init);
 250
 251static void __exit qed_exit(void)
 252{
 253	/* To prevent marking this module as "permanent" */
 254}
 255module_exit(qed_exit);
 256
 257static void qed_free_pci(struct qed_dev *cdev)
 258{
 259	struct pci_dev *pdev = cdev->pdev;
 260
 261	if (cdev->doorbells && cdev->db_size)
 262		iounmap(cdev->doorbells);
 263	if (cdev->regview)
 264		iounmap(cdev->regview);
 265	if (atomic_read(&pdev->enable_cnt) == 1)
 266		pci_release_regions(pdev);
 267
 268	pci_disable_device(pdev);
 269}
 270
 271#define PCI_REVISION_ID_ERROR_VAL	0xff
 272
 273/* Performs PCI initializations as well as initializing PCI-related parameters
 274 * in the device structrue. Returns 0 in case of success.
 275 */
 276static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
 277{
 278	u8 rev_id;
 279	int rc;
 280
 281	cdev->pdev = pdev;
 282
 283	rc = pci_enable_device(pdev);
 284	if (rc) {
 285		DP_NOTICE(cdev, "Cannot enable PCI device\n");
 286		goto err0;
 287	}
 288
 289	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 290		DP_NOTICE(cdev, "No memory region found in bar #0\n");
 291		rc = -EIO;
 292		goto err1;
 293	}
 294
 295	if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 296		DP_NOTICE(cdev, "No memory region found in bar #2\n");
 297		rc = -EIO;
 298		goto err1;
 299	}
 300
 301	if (atomic_read(&pdev->enable_cnt) == 1) {
 302		rc = pci_request_regions(pdev, "qed");
 303		if (rc) {
 304			DP_NOTICE(cdev,
 305				  "Failed to request PCI memory resources\n");
 306			goto err1;
 307		}
 308		pci_set_master(pdev);
 309		pci_save_state(pdev);
 310	}
 311
 312	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
 313	if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
 314		DP_NOTICE(cdev,
 315			  "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
 316			  rev_id);
 317		rc = -ENODEV;
 318		goto err2;
 319	}
 320	if (!pci_is_pcie(pdev)) {
 321		DP_NOTICE(cdev, "The bus is not PCI Express\n");
 322		rc = -EIO;
 323		goto err2;
 324	}
 325
 326	cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 327	if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
 328		DP_NOTICE(cdev, "Cannot find power management capability\n");
 329
 330	rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64));
 331	if (rc) {
 332		DP_NOTICE(cdev, "Can't request DMA addresses\n");
 333		rc = -EIO;
 334		goto err2;
 335	}
 336
 337	cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
 338	cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
 339	cdev->pci_params.irq = pdev->irq;
 340
 341	cdev->regview = pci_ioremap_bar(pdev, 0);
 342	if (!cdev->regview) {
 343		DP_NOTICE(cdev, "Cannot map register space, aborting\n");
 344		rc = -ENOMEM;
 345		goto err2;
 346	}
 347
 348	cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
 349	cdev->db_size = pci_resource_len(cdev->pdev, 2);
 350	if (!cdev->db_size) {
 351		if (IS_PF(cdev)) {
 352			DP_NOTICE(cdev, "No Doorbell bar available\n");
 353			return -EINVAL;
 354		} else {
 355			return 0;
 356		}
 357	}
 358
 359	cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
 360
 361	if (!cdev->doorbells) {
 362		DP_NOTICE(cdev, "Cannot map doorbell space\n");
 363		return -ENOMEM;
 364	}
 365
 366	return 0;
 367
 368err2:
 369	pci_release_regions(pdev);
 370err1:
 371	pci_disable_device(pdev);
 372err0:
 373	return rc;
 374}
 375
 376int qed_fill_dev_info(struct qed_dev *cdev,
 377		      struct qed_dev_info *dev_info)
 378{
 379	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 380	struct qed_hw_info *hw_info = &p_hwfn->hw_info;
 381	struct qed_tunnel_info *tun = &cdev->tunnel;
 382	struct qed_ptt  *ptt;
 383
 384	memset(dev_info, 0, sizeof(struct qed_dev_info));
 385
 386	if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 387	    tun->vxlan.b_mode_enabled)
 388		dev_info->vxlan_enable = true;
 389
 390	if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
 391	    tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 392	    tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 393		dev_info->gre_enable = true;
 394
 395	if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
 396	    tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 397	    tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 398		dev_info->geneve_enable = true;
 399
 400	dev_info->num_hwfns = cdev->num_hwfns;
 401	dev_info->pci_mem_start = cdev->pci_params.mem_start;
 402	dev_info->pci_mem_end = cdev->pci_params.mem_end;
 403	dev_info->pci_irq = cdev->pci_params.irq;
 404	dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
 405	dev_info->dev_type = cdev->type;
 406	ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
 407
 408	if (IS_PF(cdev)) {
 409		dev_info->fw_major = FW_MAJOR_VERSION;
 410		dev_info->fw_minor = FW_MINOR_VERSION;
 411		dev_info->fw_rev = FW_REVISION_VERSION;
 412		dev_info->fw_eng = FW_ENGINEERING_VERSION;
 413		dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
 414						       &cdev->mf_bits);
 415		if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
 416			dev_info->b_arfs_capable = true;
 417		dev_info->tx_switching = true;
 418
 419		if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
 420			dev_info->wol_support = true;
 421
 422		dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
 423		dev_info->esl = qed_mcp_is_esl_supported(p_hwfn);
 424		dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
 425	} else {
 426		qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
 427				      &dev_info->fw_minor, &dev_info->fw_rev,
 428				      &dev_info->fw_eng);
 429	}
 430
 431	if (IS_PF(cdev)) {
 432		ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
 433		if (ptt) {
 434			qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
 435					    &dev_info->mfw_rev, NULL);
 436
 437			qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
 438					    &dev_info->mbi_version);
 439
 440			qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
 441					       &dev_info->flash_size);
 442
 443			qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
 444		}
 445	} else {
 446		qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
 447				    &dev_info->mfw_rev, NULL);
 448	}
 449
 450	dev_info->mtu = hw_info->mtu;
 451	cdev->common_dev_info = *dev_info;
 452
 453	return 0;
 454}
 455
 456static void qed_free_cdev(struct qed_dev *cdev)
 457{
 458	kfree((void *)cdev);
 459}
 460
 461static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
 462{
 463	struct qed_dev *cdev;
 464
 465	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
 466	if (!cdev)
 467		return cdev;
 468
 469	qed_init_struct(cdev);
 470
 471	return cdev;
 472}
 473
 474/* Sets the requested power state */
 475static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
 476{
 477	if (!cdev)
 478		return -ENODEV;
 479
 480	DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
 481	return 0;
 482}
 483
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 484/* probing */
 485static struct qed_dev *qed_probe(struct pci_dev *pdev,
 486				 struct qed_probe_params *params)
 487{
 488	struct qed_dev *cdev;
 489	int rc;
 490
 491	cdev = qed_alloc_cdev(pdev);
 492	if (!cdev)
 493		goto err0;
 494
 495	cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
 496	cdev->protocol = params->protocol;
 497
 498	if (params->is_vf)
 499		cdev->b_is_vf = true;
 500
 501	qed_init_dp(cdev, params->dp_module, params->dp_level);
 502
 503	cdev->recov_in_prog = params->recov_in_prog;
 504
 505	rc = qed_init_pci(cdev, pdev);
 506	if (rc) {
 507		DP_ERR(cdev, "init pci failed\n");
 508		goto err1;
 509	}
 510	DP_INFO(cdev, "PCI init completed successfully\n");
 511
 
 
 
 
 
 
 512	rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
 513	if (rc) {
 514		DP_ERR(cdev, "hw prepare failed\n");
 515		goto err2;
 516	}
 517
 518	DP_INFO(cdev, "%s completed successfully\n", __func__);
 519
 520	return cdev;
 521
 522err2:
 523	qed_free_pci(cdev);
 524err1:
 525	qed_free_cdev(cdev);
 526err0:
 527	return NULL;
 528}
 529
 530static void qed_remove(struct qed_dev *cdev)
 531{
 532	if (!cdev)
 533		return;
 534
 535	qed_hw_remove(cdev);
 536
 537	qed_free_pci(cdev);
 538
 539	qed_set_power_state(cdev, PCI_D3hot);
 540
 
 
 541	qed_free_cdev(cdev);
 542}
 543
 544static void qed_disable_msix(struct qed_dev *cdev)
 545{
 546	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 547		pci_disable_msix(cdev->pdev);
 548		kfree(cdev->int_params.msix_table);
 549	} else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
 550		pci_disable_msi(cdev->pdev);
 551	}
 552
 553	memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
 554}
 555
 556static int qed_enable_msix(struct qed_dev *cdev,
 557			   struct qed_int_params *int_params)
 558{
 559	int i, rc, cnt;
 560
 561	cnt = int_params->in.num_vectors;
 562
 563	for (i = 0; i < cnt; i++)
 564		int_params->msix_table[i].entry = i;
 565
 566	rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
 567				   int_params->in.min_msix_cnt, cnt);
 568	if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
 569	    (rc % cdev->num_hwfns)) {
 570		pci_disable_msix(cdev->pdev);
 571
 572		/* If fastpath is initialized, we need at least one interrupt
 573		 * per hwfn [and the slow path interrupts]. New requested number
 574		 * should be a multiple of the number of hwfns.
 575		 */
 576		cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
 577		DP_NOTICE(cdev,
 578			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
 579			  cnt, int_params->in.num_vectors);
 580		rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
 581					   cnt);
 582		if (!rc)
 583			rc = cnt;
 584	}
 585
 586	/* For VFs, we should return with an error in case we didn't get the
 587	 * exact number of msix vectors as we requested.
 588	 * Not doing that will lead to a crash when starting queues for
 589	 * this VF.
 590	 */
 591	if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
 592		/* MSI-x configuration was achieved */
 593		int_params->out.int_mode = QED_INT_MODE_MSIX;
 594		int_params->out.num_vectors = rc;
 595		rc = 0;
 596	} else {
 597		DP_NOTICE(cdev,
 598			  "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
 599			  cnt, rc);
 600	}
 601
 602	return rc;
 603}
 604
 605/* This function outputs the int mode and the number of enabled msix vector */
 606static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
 607{
 608	struct qed_int_params *int_params = &cdev->int_params;
 609	struct msix_entry *tbl;
 610	int rc = 0, cnt;
 611
 612	switch (int_params->in.int_mode) {
 613	case QED_INT_MODE_MSIX:
 614		/* Allocate MSIX table */
 615		cnt = int_params->in.num_vectors;
 616		int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
 617		if (!int_params->msix_table) {
 618			rc = -ENOMEM;
 619			goto out;
 620		}
 621
 622		/* Enable MSIX */
 623		rc = qed_enable_msix(cdev, int_params);
 624		if (!rc)
 625			goto out;
 626
 627		DP_NOTICE(cdev, "Failed to enable MSI-X\n");
 628		kfree(int_params->msix_table);
 629		if (force_mode)
 630			goto out;
 631		fallthrough;
 632
 633	case QED_INT_MODE_MSI:
 634		if (cdev->num_hwfns == 1) {
 635			rc = pci_enable_msi(cdev->pdev);
 636			if (!rc) {
 637				int_params->out.int_mode = QED_INT_MODE_MSI;
 638				goto out;
 639			}
 640
 641			DP_NOTICE(cdev, "Failed to enable MSI\n");
 642			if (force_mode)
 643				goto out;
 644		}
 645		fallthrough;
 646
 647	case QED_INT_MODE_INTA:
 648			int_params->out.int_mode = QED_INT_MODE_INTA;
 649			rc = 0;
 650			goto out;
 651	default:
 652		DP_NOTICE(cdev, "Unknown int_mode value %d\n",
 653			  int_params->in.int_mode);
 654		rc = -EINVAL;
 655	}
 656
 657out:
 658	if (!rc)
 659		DP_INFO(cdev, "Using %s interrupts\n",
 660			int_params->out.int_mode == QED_INT_MODE_INTA ?
 661			"INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
 662			"MSI" : "MSIX");
 663	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
 664
 665	return rc;
 666}
 667
 668static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
 669				    int index, void(*handler)(void *))
 670{
 671	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 672	int relative_idx = index / cdev->num_hwfns;
 673
 674	hwfn->simd_proto_handler[relative_idx].func = handler;
 675	hwfn->simd_proto_handler[relative_idx].token = token;
 676}
 677
 678static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
 679{
 680	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 681	int relative_idx = index / cdev->num_hwfns;
 682
 683	memset(&hwfn->simd_proto_handler[relative_idx], 0,
 684	       sizeof(struct qed_simd_fp_handler));
 685}
 686
 687static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
 688{
 689	tasklet_schedule((struct tasklet_struct *)tasklet);
 690	return IRQ_HANDLED;
 691}
 692
 693static irqreturn_t qed_single_int(int irq, void *dev_instance)
 694{
 695	struct qed_dev *cdev = (struct qed_dev *)dev_instance;
 696	struct qed_hwfn *hwfn;
 697	irqreturn_t rc = IRQ_NONE;
 698	u64 status;
 699	int i, j;
 700
 701	for (i = 0; i < cdev->num_hwfns; i++) {
 702		status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
 703
 704		if (!status)
 705			continue;
 706
 707		hwfn = &cdev->hwfns[i];
 708
 709		/* Slowpath interrupt */
 710		if (unlikely(status & 0x1)) {
 711			tasklet_schedule(&hwfn->sp_dpc);
 712			status &= ~0x1;
 713			rc = IRQ_HANDLED;
 714		}
 715
 716		/* Fastpath interrupts */
 717		for (j = 0; j < 64; j++) {
 718			if ((0x2ULL << j) & status) {
 719				struct qed_simd_fp_handler *p_handler =
 720					&hwfn->simd_proto_handler[j];
 721
 722				if (p_handler->func)
 723					p_handler->func(p_handler->token);
 724				else
 725					DP_NOTICE(hwfn,
 726						  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
 727						  j, status);
 728
 729				status &= ~(0x2ULL << j);
 730				rc = IRQ_HANDLED;
 731			}
 732		}
 733
 734		if (unlikely(status))
 735			DP_VERBOSE(hwfn, NETIF_MSG_INTR,
 736				   "got an unknown interrupt status 0x%llx\n",
 737				   status);
 738	}
 739
 740	return rc;
 741}
 742
 743int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 744{
 745	struct qed_dev *cdev = hwfn->cdev;
 746	u32 int_mode;
 747	int rc = 0;
 748	u8 id;
 749
 750	int_mode = cdev->int_params.out.int_mode;
 751	if (int_mode == QED_INT_MODE_MSIX) {
 752		id = hwfn->my_id;
 753		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
 754			 id, cdev->pdev->bus->number,
 755			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
 756		rc = request_irq(cdev->int_params.msix_table[id].vector,
 757				 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
 758	} else {
 759		unsigned long flags = 0;
 760
 761		snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
 762			 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
 763			 PCI_FUNC(cdev->pdev->devfn));
 764
 765		if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
 766			flags |= IRQF_SHARED;
 767
 768		rc = request_irq(cdev->pdev->irq, qed_single_int,
 769				 flags, cdev->name, cdev);
 770	}
 771
 772	if (rc)
 773		DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
 774	else
 775		DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
 776			   "Requested slowpath %s\n",
 777			   (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
 778
 779	return rc;
 780}
 781
 782static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
 783{
 784	/* Calling the disable function will make sure that any
 785	 * currently-running function is completed. The following call to the
 786	 * enable function makes this sequence a flush-like operation.
 787	 */
 788	if (p_hwfn->b_sp_dpc_enabled) {
 789		tasklet_disable(&p_hwfn->sp_dpc);
 790		tasklet_enable(&p_hwfn->sp_dpc);
 791	}
 792}
 793
 794void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
 795{
 796	struct qed_dev *cdev = p_hwfn->cdev;
 797	u8 id = p_hwfn->my_id;
 798	u32 int_mode;
 799
 800	int_mode = cdev->int_params.out.int_mode;
 801	if (int_mode == QED_INT_MODE_MSIX)
 802		synchronize_irq(cdev->int_params.msix_table[id].vector);
 803	else
 804		synchronize_irq(cdev->pdev->irq);
 805
 806	qed_slowpath_tasklet_flush(p_hwfn);
 807}
 808
 809static void qed_slowpath_irq_free(struct qed_dev *cdev)
 810{
 811	int i;
 812
 813	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 814		for_each_hwfn(cdev, i) {
 815			if (!cdev->hwfns[i].b_int_requested)
 816				break;
 
 817			free_irq(cdev->int_params.msix_table[i].vector,
 818				 &cdev->hwfns[i].sp_dpc);
 819		}
 820	} else {
 821		if (QED_LEADING_HWFN(cdev)->b_int_requested)
 822			free_irq(cdev->pdev->irq, cdev);
 823	}
 824	qed_int_disable_post_isr_release(cdev);
 825}
 826
 827static int qed_nic_stop(struct qed_dev *cdev)
 828{
 829	int i, rc;
 830
 831	rc = qed_hw_stop(cdev);
 832
 833	for (i = 0; i < cdev->num_hwfns; i++) {
 834		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 835
 836		if (p_hwfn->b_sp_dpc_enabled) {
 837			tasklet_disable(&p_hwfn->sp_dpc);
 838			p_hwfn->b_sp_dpc_enabled = false;
 839			DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
 840				   "Disabled sp tasklet [hwfn %d] at %p\n",
 841				   i, &p_hwfn->sp_dpc);
 842		}
 843	}
 844
 845	qed_dbg_pf_exit(cdev);
 846
 847	return rc;
 848}
 849
 850static int qed_nic_setup(struct qed_dev *cdev)
 851{
 852	int rc, i;
 853
 854	/* Determine if interface is going to require LL2 */
 855	if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
 856		for (i = 0; i < cdev->num_hwfns; i++) {
 857			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 858
 859			p_hwfn->using_ll2 = true;
 860		}
 861	}
 862
 863	rc = qed_resc_alloc(cdev);
 864	if (rc)
 865		return rc;
 866
 867	DP_INFO(cdev, "Allocated qed resources\n");
 868
 869	qed_resc_setup(cdev);
 870
 871	return rc;
 872}
 873
 874static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
 875{
 876	int limit = 0;
 877
 878	/* Mark the fastpath as free/used */
 879	cdev->int_params.fp_initialized = cnt ? true : false;
 880
 881	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
 882		limit = cdev->num_hwfns * 63;
 883	else if (cdev->int_params.fp_msix_cnt)
 884		limit = cdev->int_params.fp_msix_cnt;
 885
 886	if (!limit)
 887		return -ENOMEM;
 888
 889	return min_t(int, cnt, limit);
 890}
 891
 892static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
 893{
 894	memset(info, 0, sizeof(struct qed_int_info));
 895
 896	if (!cdev->int_params.fp_initialized) {
 897		DP_INFO(cdev,
 898			"Protocol driver requested interrupt information, but its support is not yet configured\n");
 899		return -EINVAL;
 900	}
 901
 902	/* Need to expose only MSI-X information; Single IRQ is handled solely
 903	 * by qed.
 904	 */
 905	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 906		int msix_base = cdev->int_params.fp_msix_base;
 907
 908		info->msix_cnt = cdev->int_params.fp_msix_cnt;
 909		info->msix = &cdev->int_params.msix_table[msix_base];
 910	}
 911
 912	return 0;
 913}
 914
 915static int qed_slowpath_setup_int(struct qed_dev *cdev,
 916				  enum qed_int_mode int_mode)
 917{
 918	struct qed_sb_cnt_info sb_cnt_info;
 919	int num_l2_queues = 0;
 920	int rc;
 921	int i;
 922
 923	if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
 924		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
 925		return -EINVAL;
 926	}
 927
 928	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
 929	cdev->int_params.in.int_mode = int_mode;
 930	for_each_hwfn(cdev, i) {
 931		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
 932		qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
 933		cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
 934		cdev->int_params.in.num_vectors++; /* slowpath */
 935	}
 936
 937	/* We want a minimum of one slowpath and one fastpath vector per hwfn */
 938	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 939
 940	if (is_kdump_kernel()) {
 941		DP_INFO(cdev,
 942			"Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
 943			cdev->int_params.in.min_msix_cnt);
 944		cdev->int_params.in.num_vectors =
 945			cdev->int_params.in.min_msix_cnt;
 946	}
 947
 948	rc = qed_set_int_mode(cdev, false);
 949	if (rc)  {
 950		DP_ERR(cdev, "%s ERR\n", __func__);
 951		return rc;
 952	}
 953
 954	cdev->int_params.fp_msix_base = cdev->num_hwfns;
 955	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
 956				       cdev->num_hwfns;
 957
 958	if (!IS_ENABLED(CONFIG_QED_RDMA) ||
 959	    !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
 960		return 0;
 961
 962	for_each_hwfn(cdev, i)
 963		num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
 964
 965	DP_VERBOSE(cdev, QED_MSG_RDMA,
 966		   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
 967		   cdev->int_params.fp_msix_cnt, num_l2_queues);
 968
 969	if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
 970		cdev->int_params.rdma_msix_cnt =
 971			(cdev->int_params.fp_msix_cnt - num_l2_queues)
 972			/ cdev->num_hwfns;
 973		cdev->int_params.rdma_msix_base =
 974			cdev->int_params.fp_msix_base + num_l2_queues;
 975		cdev->int_params.fp_msix_cnt = num_l2_queues;
 976	} else {
 977		cdev->int_params.rdma_msix_cnt = 0;
 978	}
 979
 980	DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
 981		   cdev->int_params.rdma_msix_cnt,
 982		   cdev->int_params.rdma_msix_base);
 983
 984	return 0;
 985}
 986
 987static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
 988{
 989	int rc;
 990
 991	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
 992	cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
 993
 994	qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
 995			    &cdev->int_params.in.num_vectors);
 996	if (cdev->num_hwfns > 1) {
 997		u8 vectors = 0;
 998
 999		qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
1000		cdev->int_params.in.num_vectors += vectors;
1001	}
1002
1003	/* We want a minimum of one fastpath vector per vf hwfn */
1004	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
1005
1006	rc = qed_set_int_mode(cdev, true);
1007	if (rc)
1008		return rc;
1009
1010	cdev->int_params.fp_msix_base = 0;
1011	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
1012
1013	return 0;
1014}
1015
1016u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
1017		   u8 *input_buf, u32 max_size, u8 *unzip_buf)
1018{
1019	int rc;
1020
1021	p_hwfn->stream->next_in = input_buf;
1022	p_hwfn->stream->avail_in = input_len;
1023	p_hwfn->stream->next_out = unzip_buf;
1024	p_hwfn->stream->avail_out = max_size;
1025
1026	rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
1027
1028	if (rc != Z_OK) {
1029		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1030			   rc);
1031		return 0;
1032	}
1033
1034	rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1035	zlib_inflateEnd(p_hwfn->stream);
1036
1037	if (rc != Z_OK && rc != Z_STREAM_END) {
1038		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1039			   p_hwfn->stream->msg, rc);
1040		return 0;
1041	}
1042
1043	return p_hwfn->stream->total_out / 4;
1044}
1045
1046static int qed_alloc_stream_mem(struct qed_dev *cdev)
1047{
1048	int i;
1049	void *workspace;
1050
1051	for_each_hwfn(cdev, i) {
1052		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1053
1054		p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1055		if (!p_hwfn->stream)
1056			return -ENOMEM;
1057
1058		workspace = vzalloc(zlib_inflate_workspacesize());
1059		if (!workspace)
1060			return -ENOMEM;
1061		p_hwfn->stream->workspace = workspace;
1062	}
1063
1064	return 0;
1065}
1066
1067static void qed_free_stream_mem(struct qed_dev *cdev)
1068{
1069	int i;
1070
1071	for_each_hwfn(cdev, i) {
1072		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1073
1074		if (!p_hwfn->stream)
1075			return;
1076
1077		vfree(p_hwfn->stream->workspace);
1078		kfree(p_hwfn->stream);
1079	}
1080}
1081
1082static void qed_update_pf_params(struct qed_dev *cdev,
1083				 struct qed_pf_params *params)
1084{
1085	int i;
1086
1087	if (IS_ENABLED(CONFIG_QED_RDMA)) {
1088		params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1089		params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1090		params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1091		/* divide by 3 the MRs to avoid MF ILT overflow */
1092		params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1093	}
1094
1095	if (cdev->num_hwfns > 1 || IS_VF(cdev))
1096		params->eth_pf_params.num_arfs_filters = 0;
1097
1098	/* In case we might support RDMA, don't allow qede to be greedy
1099	 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1100	 * per hwfn.
1101	 */
1102	if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1103		u16 *num_cons;
1104
1105		num_cons = &params->eth_pf_params.num_cons;
1106		*num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1107	}
1108
1109	for (i = 0; i < cdev->num_hwfns; i++) {
1110		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1111
1112		p_hwfn->pf_params = *params;
1113	}
1114}
1115
1116#define QED_PERIODIC_DB_REC_COUNT		10
1117#define QED_PERIODIC_DB_REC_INTERVAL_MS		100
1118#define QED_PERIODIC_DB_REC_INTERVAL \
1119	msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
 
 
 
1120
1121static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1122				     enum qed_slowpath_wq_flag wq_flag,
1123				     unsigned long delay)
1124{
1125	if (!hwfn->slowpath_wq_active)
1126		return -EINVAL;
1127
1128	/* Memory barrier for setting atomic bit */
1129	smp_mb__before_atomic();
1130	set_bit(wq_flag, &hwfn->slowpath_task_flags);
1131	/* Memory barrier after setting atomic bit */
1132	smp_mb__after_atomic();
1133	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1134
1135	return 0;
1136}
1137
1138void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1139{
1140	/* Reset periodic Doorbell Recovery counter */
1141	p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1142
1143	/* Don't schedule periodic Doorbell Recovery if already scheduled */
1144	if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1145		     &p_hwfn->slowpath_task_flags))
1146		return;
1147
1148	qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1149				  QED_PERIODIC_DB_REC_INTERVAL);
1150}
1151
1152static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1153{
1154	int i;
1155
1156	if (IS_VF(cdev))
1157		return;
1158
1159	for_each_hwfn(cdev, i) {
1160		if (!cdev->hwfns[i].slowpath_wq)
1161			continue;
1162
1163		/* Stop queuing new delayed works */
1164		cdev->hwfns[i].slowpath_wq_active = false;
1165
1166		cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
 
 
 
 
 
 
1167		destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1168	}
1169}
1170
1171static void qed_slowpath_task(struct work_struct *work)
1172{
1173	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1174					     slowpath_task.work);
1175	struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1176
1177	if (!ptt) {
1178		if (hwfn->slowpath_wq_active)
1179			queue_delayed_work(hwfn->slowpath_wq,
1180					   &hwfn->slowpath_task, 0);
1181
1182		return;
1183	}
1184
1185	if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1186			       &hwfn->slowpath_task_flags))
1187		qed_mfw_process_tlv_req(hwfn, ptt);
1188
1189	if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1190			       &hwfn->slowpath_task_flags)) {
1191		/* skip qed_db_rec_handler during recovery/unload */
1192		if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active)
1193			goto out;
1194
1195		qed_db_rec_handler(hwfn, ptt);
1196		if (hwfn->periodic_db_rec_count--)
1197			qed_slowpath_delayed_work(hwfn,
1198						  QED_SLOWPATH_PERIODIC_DB_REC,
1199						  QED_PERIODIC_DB_REC_INTERVAL);
1200	}
1201
1202out:
1203	qed_ptt_release(hwfn, ptt);
1204}
1205
1206static int qed_slowpath_wq_start(struct qed_dev *cdev)
1207{
1208	struct qed_hwfn *hwfn;
1209	char name[NAME_SIZE];
1210	int i;
1211
1212	if (IS_VF(cdev))
1213		return 0;
1214
1215	for_each_hwfn(cdev, i) {
1216		hwfn = &cdev->hwfns[i];
1217
1218		snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1219			 cdev->pdev->bus->number,
1220			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1221
1222		hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1223		if (!hwfn->slowpath_wq) {
1224			DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1225			return -ENOMEM;
1226		}
1227
1228		INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1229		hwfn->slowpath_wq_active = true;
1230	}
1231
1232	return 0;
1233}
1234
1235static int qed_slowpath_start(struct qed_dev *cdev,
1236			      struct qed_slowpath_params *params)
1237{
1238	struct qed_drv_load_params drv_load_params;
1239	struct qed_hw_init_params hw_init_params;
1240	struct qed_mcp_drv_version drv_version;
1241	struct qed_tunnel_info tunn_info;
1242	const u8 *data = NULL;
1243	struct qed_hwfn *hwfn;
1244	struct qed_ptt *p_ptt;
1245	int rc = -EINVAL;
1246
1247	if (qed_iov_wq_start(cdev))
1248		goto err;
1249
1250	if (qed_slowpath_wq_start(cdev))
1251		goto err;
1252
1253	if (IS_PF(cdev)) {
1254		rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1255				      &cdev->pdev->dev);
1256		if (rc) {
1257			DP_NOTICE(cdev,
1258				  "Failed to find fw file - /lib/firmware/%s\n",
1259				  QED_FW_FILE_NAME);
1260			goto err;
1261		}
1262
1263		if (cdev->num_hwfns == 1) {
1264			p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1265			if (p_ptt) {
1266				QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1267			} else {
1268				DP_NOTICE(cdev,
1269					  "Failed to acquire PTT for aRFS\n");
1270				rc = -EINVAL;
1271				goto err;
1272			}
1273		}
1274	}
1275
1276	cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1277	rc = qed_nic_setup(cdev);
1278	if (rc)
1279		goto err;
1280
1281	if (IS_PF(cdev))
1282		rc = qed_slowpath_setup_int(cdev, params->int_mode);
1283	else
1284		rc = qed_slowpath_vf_setup_int(cdev);
1285	if (rc)
1286		goto err1;
1287
1288	if (IS_PF(cdev)) {
1289		/* Allocate stream for unzipping */
1290		rc = qed_alloc_stream_mem(cdev);
1291		if (rc)
1292			goto err2;
1293
1294		/* First Dword used to differentiate between various sources */
1295		data = cdev->firmware->data + sizeof(u32);
1296
1297		qed_dbg_pf_init(cdev);
1298	}
1299
1300	/* Start the slowpath */
1301	memset(&hw_init_params, 0, sizeof(hw_init_params));
1302	memset(&tunn_info, 0, sizeof(tunn_info));
1303	tunn_info.vxlan.b_mode_enabled = true;
1304	tunn_info.l2_gre.b_mode_enabled = true;
1305	tunn_info.ip_gre.b_mode_enabled = true;
1306	tunn_info.l2_geneve.b_mode_enabled = true;
1307	tunn_info.ip_geneve.b_mode_enabled = true;
1308	tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1309	tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1310	tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1311	tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1312	tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1313	hw_init_params.p_tunn = &tunn_info;
1314	hw_init_params.b_hw_start = true;
1315	hw_init_params.int_mode = cdev->int_params.out.int_mode;
1316	hw_init_params.allow_npar_tx_switch = true;
1317	hw_init_params.bin_fw_data = data;
1318
1319	memset(&drv_load_params, 0, sizeof(drv_load_params));
1320	drv_load_params.is_crash_kernel = is_kdump_kernel();
1321	drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1322	drv_load_params.avoid_eng_reset = false;
1323	drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1324	hw_init_params.p_drv_load_params = &drv_load_params;
1325
1326	rc = qed_hw_init(cdev, &hw_init_params);
1327	if (rc)
1328		goto err2;
1329
1330	DP_INFO(cdev,
1331		"HW initialization and function start completed successfully\n");
1332
1333	if (IS_PF(cdev)) {
1334		cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1335					   BIT(QED_MODE_L2GENEVE_TUNN) |
1336					   BIT(QED_MODE_IPGENEVE_TUNN) |
1337					   BIT(QED_MODE_L2GRE_TUNN) |
1338					   BIT(QED_MODE_IPGRE_TUNN));
1339	}
1340
1341	/* Allocate LL2 interface if needed */
1342	if (QED_LEADING_HWFN(cdev)->using_ll2) {
1343		rc = qed_ll2_alloc_if(cdev);
1344		if (rc)
1345			goto err3;
1346	}
1347	if (IS_PF(cdev)) {
1348		hwfn = QED_LEADING_HWFN(cdev);
1349		drv_version.version = (params->drv_major << 24) |
1350				      (params->drv_minor << 16) |
1351				      (params->drv_rev << 8) |
1352				      (params->drv_eng);
1353		strscpy(drv_version.name, params->name,
1354			MCP_DRV_VER_STR_SIZE - 4);
1355		rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1356					      &drv_version);
1357		if (rc) {
1358			DP_NOTICE(cdev, "Failed sending drv version command\n");
1359			goto err4;
1360		}
1361	}
1362
1363	qed_reset_vport_stats(cdev);
1364
1365	return 0;
1366
1367err4:
1368	qed_ll2_dealloc_if(cdev);
1369err3:
1370	qed_hw_stop(cdev);
1371err2:
1372	qed_hw_timers_stop_all(cdev);
1373	if (IS_PF(cdev))
1374		qed_slowpath_irq_free(cdev);
1375	qed_free_stream_mem(cdev);
1376	qed_disable_msix(cdev);
1377err1:
1378	qed_resc_free(cdev);
1379err:
1380	if (IS_PF(cdev))
1381		release_firmware(cdev->firmware);
1382
1383	if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1384	    QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1385		qed_ptt_release(QED_LEADING_HWFN(cdev),
1386				QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1387
1388	qed_iov_wq_stop(cdev, false);
1389
1390	qed_slowpath_wq_stop(cdev);
1391
1392	return rc;
1393}
1394
1395static int qed_slowpath_stop(struct qed_dev *cdev)
1396{
1397	if (!cdev)
1398		return -ENODEV;
1399
1400	qed_slowpath_wq_stop(cdev);
1401
1402	qed_ll2_dealloc_if(cdev);
1403
1404	if (IS_PF(cdev)) {
1405		if (cdev->num_hwfns == 1)
1406			qed_ptt_release(QED_LEADING_HWFN(cdev),
1407					QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1408		qed_free_stream_mem(cdev);
1409		if (IS_QED_ETH_IF(cdev))
1410			qed_sriov_disable(cdev, true);
1411	}
1412
1413	qed_nic_stop(cdev);
1414
1415	if (IS_PF(cdev))
1416		qed_slowpath_irq_free(cdev);
1417
1418	qed_disable_msix(cdev);
1419
1420	qed_resc_free(cdev);
1421
1422	qed_iov_wq_stop(cdev, true);
1423
1424	if (IS_PF(cdev))
1425		release_firmware(cdev->firmware);
1426
1427	return 0;
1428}
1429
1430static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1431{
1432	int i;
1433
1434	memcpy(cdev->name, name, NAME_SIZE);
1435	for_each_hwfn(cdev, i)
1436		snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1437}
1438
1439static u32 qed_sb_init(struct qed_dev *cdev,
1440		       struct qed_sb_info *sb_info,
1441		       void *sb_virt_addr,
1442		       dma_addr_t sb_phy_addr, u16 sb_id,
1443		       enum qed_sb_type type)
1444{
1445	struct qed_hwfn *p_hwfn;
1446	struct qed_ptt *p_ptt;
1447	u16 rel_sb_id;
1448	u32 rc;
1449
1450	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1451	if (type == QED_SB_TYPE_L2_QUEUE) {
1452		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1453		rel_sb_id = sb_id / cdev->num_hwfns;
1454	} else {
1455		p_hwfn = QED_AFFIN_HWFN(cdev);
1456		rel_sb_id = sb_id;
1457	}
1458
1459	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1460		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1461		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1462
1463	if (IS_PF(p_hwfn->cdev)) {
1464		p_ptt = qed_ptt_acquire(p_hwfn);
1465		if (!p_ptt)
1466			return -EBUSY;
1467
1468		rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1469				     sb_phy_addr, rel_sb_id);
1470		qed_ptt_release(p_hwfn, p_ptt);
1471	} else {
1472		rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1473				     sb_phy_addr, rel_sb_id);
1474	}
1475
1476	return rc;
1477}
1478
1479static u32 qed_sb_release(struct qed_dev *cdev,
1480			  struct qed_sb_info *sb_info,
1481			  u16 sb_id,
1482			  enum qed_sb_type type)
1483{
1484	struct qed_hwfn *p_hwfn;
1485	u16 rel_sb_id;
1486	u32 rc;
1487
1488	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1489	if (type == QED_SB_TYPE_L2_QUEUE) {
1490		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1491		rel_sb_id = sb_id / cdev->num_hwfns;
1492	} else {
1493		p_hwfn = QED_AFFIN_HWFN(cdev);
1494		rel_sb_id = sb_id;
1495	}
1496
1497	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1498		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1499		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1500
1501	rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1502
1503	return rc;
1504}
1505
1506static bool qed_can_link_change(struct qed_dev *cdev)
1507{
1508	return true;
1509}
1510
1511static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params,
1512				     const struct qed_link_params *params)
1513{
1514	struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed;
1515	const struct qed_mfw_speed_map *map;
1516	u32 i;
1517
1518	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1519		ext_speed->autoneg = !!params->autoneg;
1520
1521	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1522		ext_speed->advertised_speeds = 0;
1523
1524		for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) {
1525			map = qed_mfw_ext_maps + i;
1526
1527			if (linkmode_intersects(params->adv_speeds, map->caps))
1528				ext_speed->advertised_speeds |= map->mfw_val;
1529		}
1530	}
1531
1532	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) {
1533		switch (params->forced_speed) {
1534		case SPEED_1000:
1535			ext_speed->forced_speed = QED_EXT_SPEED_1G;
1536			break;
1537		case SPEED_10000:
1538			ext_speed->forced_speed = QED_EXT_SPEED_10G;
1539			break;
1540		case SPEED_20000:
1541			ext_speed->forced_speed = QED_EXT_SPEED_20G;
1542			break;
1543		case SPEED_25000:
1544			ext_speed->forced_speed = QED_EXT_SPEED_25G;
1545			break;
1546		case SPEED_40000:
1547			ext_speed->forced_speed = QED_EXT_SPEED_40G;
1548			break;
1549		case SPEED_50000:
1550			ext_speed->forced_speed = QED_EXT_SPEED_50G_R |
1551						  QED_EXT_SPEED_50G_R2;
1552			break;
1553		case SPEED_100000:
1554			ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 |
1555						  QED_EXT_SPEED_100G_R4 |
1556						  QED_EXT_SPEED_100G_P4;
1557			break;
1558		default:
1559			break;
1560		}
1561	}
1562
1563	if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG))
1564		return;
1565
1566	switch (params->forced_speed) {
1567	case SPEED_25000:
1568		switch (params->fec) {
1569		case FEC_FORCE_MODE_NONE:
1570			link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE;
1571			break;
1572		case FEC_FORCE_MODE_FIRECODE:
1573			link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R;
1574			break;
1575		case FEC_FORCE_MODE_RS:
1576			link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528;
1577			break;
1578		case FEC_FORCE_MODE_AUTO:
1579			link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 |
1580						    ETH_EXT_FEC_25G_BASE_R |
1581						    ETH_EXT_FEC_25G_NONE;
1582			break;
1583		default:
1584			break;
1585		}
1586
1587		break;
1588	case SPEED_40000:
1589		switch (params->fec) {
1590		case FEC_FORCE_MODE_NONE:
1591			link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE;
1592			break;
1593		case FEC_FORCE_MODE_FIRECODE:
1594			link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R;
1595			break;
1596		case FEC_FORCE_MODE_AUTO:
1597			link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R |
1598						    ETH_EXT_FEC_40G_NONE;
1599			break;
1600		default:
1601			break;
1602		}
1603
1604		break;
1605	case SPEED_50000:
1606		switch (params->fec) {
1607		case FEC_FORCE_MODE_NONE:
1608			link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE;
1609			break;
1610		case FEC_FORCE_MODE_FIRECODE:
1611			link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R;
1612			break;
1613		case FEC_FORCE_MODE_RS:
1614			link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528;
1615			break;
1616		case FEC_FORCE_MODE_AUTO:
1617			link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 |
1618						    ETH_EXT_FEC_50G_BASE_R |
1619						    ETH_EXT_FEC_50G_NONE;
1620			break;
1621		default:
1622			break;
1623		}
1624
1625		break;
1626	case SPEED_100000:
1627		switch (params->fec) {
1628		case FEC_FORCE_MODE_NONE:
1629			link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE;
1630			break;
1631		case FEC_FORCE_MODE_FIRECODE:
1632			link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R;
1633			break;
1634		case FEC_FORCE_MODE_RS:
1635			link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528;
1636			break;
1637		case FEC_FORCE_MODE_AUTO:
1638			link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 |
1639						    ETH_EXT_FEC_100G_BASE_R |
1640						    ETH_EXT_FEC_100G_NONE;
1641			break;
1642		default:
1643			break;
1644		}
1645
1646		break;
1647	default:
1648		break;
1649	}
1650}
1651
1652static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1653{
1654	struct qed_mcp_link_params *link_params;
1655	struct qed_mcp_link_speed_params *speed;
1656	const struct qed_mfw_speed_map *map;
1657	struct qed_hwfn *hwfn;
 
1658	struct qed_ptt *ptt;
 
1659	int rc;
1660	u32 i;
1661
1662	if (!cdev)
1663		return -ENODEV;
1664
1665	/* The link should be set only once per PF */
1666	hwfn = &cdev->hwfns[0];
1667
1668	/* When VF wants to set link, force it to read the bulletin instead.
1669	 * This mimics the PF behavior, where a noitification [both immediate
1670	 * and possible later] would be generated when changing properties.
1671	 */
1672	if (IS_VF(cdev)) {
1673		qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1674		return 0;
1675	}
1676
1677	ptt = qed_ptt_acquire(hwfn);
1678	if (!ptt)
1679		return -EBUSY;
1680
1681	link_params = qed_mcp_get_link_params(hwfn);
1682	if (!link_params)
1683		return -ENODATA;
1684
1685	speed = &link_params->speed;
1686
1687	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1688		speed->autoneg = !!params->autoneg;
1689
1690	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1691		speed->advertised_speeds = 0;
1692
1693		for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
1694			map = qed_mfw_legacy_maps + i;
1695
1696			if (linkmode_intersects(params->adv_speeds, map->caps))
1697				speed->advertised_speeds |= map->mfw_val;
1698		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1699	}
1700
1701	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1702		speed->forced_speed = params->forced_speed;
1703
1704	if (qed_mcp_is_ext_speed_supported(hwfn))
1705		qed_set_ext_speed_params(link_params, params);
1706
1707	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1708		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1709			link_params->pause.autoneg = true;
1710		else
1711			link_params->pause.autoneg = false;
1712		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1713			link_params->pause.forced_rx = true;
1714		else
1715			link_params->pause.forced_rx = false;
1716		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1717			link_params->pause.forced_tx = true;
1718		else
1719			link_params->pause.forced_tx = false;
1720	}
1721
1722	if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1723		switch (params->loopback_mode) {
1724		case QED_LINK_LOOPBACK_INT_PHY:
1725			link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1726			break;
1727		case QED_LINK_LOOPBACK_EXT_PHY:
1728			link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1729			break;
1730		case QED_LINK_LOOPBACK_EXT:
1731			link_params->loopback_mode = ETH_LOOPBACK_EXT;
1732			break;
1733		case QED_LINK_LOOPBACK_MAC:
1734			link_params->loopback_mode = ETH_LOOPBACK_MAC;
1735			break;
1736		case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
1737			link_params->loopback_mode =
1738				ETH_LOOPBACK_CNIG_AH_ONLY_0123;
1739			break;
1740		case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
1741			link_params->loopback_mode =
1742				ETH_LOOPBACK_CNIG_AH_ONLY_2301;
1743			break;
1744		case QED_LINK_LOOPBACK_PCS_AH_ONLY:
1745			link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY;
1746			break;
1747		case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
1748			link_params->loopback_mode =
1749				ETH_LOOPBACK_REVERSE_MAC_AH_ONLY;
1750			break;
1751		case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
1752			link_params->loopback_mode =
1753				ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY;
1754			break;
1755		default:
1756			link_params->loopback_mode = ETH_LOOPBACK_NONE;
1757			break;
1758		}
1759	}
1760
1761	if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1762		memcpy(&link_params->eee, &params->eee,
1763		       sizeof(link_params->eee));
1764
1765	if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
1766		link_params->fec = params->fec;
1767
1768	rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1769
1770	qed_ptt_release(hwfn, ptt);
1771
1772	return rc;
1773}
1774
1775static int qed_get_port_type(u32 media_type)
1776{
1777	int port_type;
1778
1779	switch (media_type) {
1780	case MEDIA_SFPP_10G_FIBER:
1781	case MEDIA_SFP_1G_FIBER:
1782	case MEDIA_XFP_FIBER:
1783	case MEDIA_MODULE_FIBER:
 
1784		port_type = PORT_FIBRE;
1785		break;
1786	case MEDIA_DA_TWINAX:
1787		port_type = PORT_DA;
1788		break;
1789	case MEDIA_BASE_T:
1790		port_type = PORT_TP;
1791		break;
1792	case MEDIA_KR:
1793	case MEDIA_NOT_PRESENT:
1794		port_type = PORT_NONE;
1795		break;
1796	case MEDIA_UNSPECIFIED:
1797	default:
1798		port_type = PORT_OTHER;
1799		break;
1800	}
1801	return port_type;
1802}
1803
1804static int qed_get_link_data(struct qed_hwfn *hwfn,
1805			     struct qed_mcp_link_params *params,
1806			     struct qed_mcp_link_state *link,
1807			     struct qed_mcp_link_capabilities *link_caps)
1808{
1809	void *p;
1810
1811	if (!IS_PF(hwfn->cdev)) {
1812		qed_vf_get_link_params(hwfn, params);
1813		qed_vf_get_link_state(hwfn, link);
1814		qed_vf_get_link_caps(hwfn, link_caps);
1815
1816		return 0;
1817	}
1818
1819	p = qed_mcp_get_link_params(hwfn);
1820	if (!p)
1821		return -ENXIO;
1822	memcpy(params, p, sizeof(*params));
1823
1824	p = qed_mcp_get_link_state(hwfn);
1825	if (!p)
1826		return -ENXIO;
1827	memcpy(link, p, sizeof(*link));
1828
1829	p = qed_mcp_get_link_capabilities(hwfn);
1830	if (!p)
1831		return -ENXIO;
1832	memcpy(link_caps, p, sizeof(*link_caps));
1833
1834	return 0;
1835}
1836
1837static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1838				     struct qed_ptt *ptt, u32 capability,
1839				     unsigned long *if_caps)
1840{
1841	u32 media_type, tcvr_state, tcvr_type;
1842	u32 speed_mask, board_cfg;
1843
1844	if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1845		media_type = MEDIA_UNSPECIFIED;
1846
1847	if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1848		tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1849
1850	if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1851		speed_mask = 0xFFFFFFFF;
1852
1853	if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1854		board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1855
1856	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1857		   "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1858		   media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1859
1860	switch (media_type) {
1861	case MEDIA_DA_TWINAX:
1862		phylink_set(if_caps, FIBRE);
1863
1864		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1865			phylink_set(if_caps, 20000baseKR2_Full);
1866
1867		/* For DAC media multiple speed capabilities are supported */
1868		capability |= speed_mask;
1869
1870		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1871			phylink_set(if_caps, 1000baseKX_Full);
1872		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1873			phylink_set(if_caps, 10000baseCR_Full);
1874
1875		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1876			switch (tcvr_type) {
1877			case ETH_TRANSCEIVER_TYPE_40G_CR4:
1878			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
1879			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1880				phylink_set(if_caps, 40000baseCR4_Full);
1881				break;
1882			default:
1883				break;
1884			}
1885
1886		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1887			phylink_set(if_caps, 25000baseCR_Full);
1888		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1889			phylink_set(if_caps, 50000baseCR2_Full);
1890
1891		if (capability &
1892		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1893			switch (tcvr_type) {
1894			case ETH_TRANSCEIVER_TYPE_100G_CR4:
1895			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1896				phylink_set(if_caps, 100000baseCR4_Full);
1897				break;
1898			default:
1899				break;
1900			}
1901
1902		break;
1903	case MEDIA_BASE_T:
1904		phylink_set(if_caps, TP);
1905
1906		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1907			if (capability &
1908			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1909				phylink_set(if_caps, 1000baseT_Full);
 
1910			if (capability &
1911			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1912				phylink_set(if_caps, 10000baseT_Full);
 
1913		}
1914
1915		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1916			phylink_set(if_caps, FIBRE);
1917
1918			switch (tcvr_type) {
1919			case ETH_TRANSCEIVER_TYPE_1000BASET:
1920				phylink_set(if_caps, 1000baseT_Full);
1921				break;
1922			case ETH_TRANSCEIVER_TYPE_10G_BASET:
1923				phylink_set(if_caps, 10000baseT_Full);
1924				break;
1925			default:
1926				break;
1927			}
1928		}
1929
1930		break;
1931	case MEDIA_SFP_1G_FIBER:
1932	case MEDIA_SFPP_10G_FIBER:
1933	case MEDIA_XFP_FIBER:
1934	case MEDIA_MODULE_FIBER:
1935		phylink_set(if_caps, FIBRE);
1936		capability |= speed_mask;
1937
1938		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1939			switch (tcvr_type) {
1940			case ETH_TRANSCEIVER_TYPE_1G_LX:
1941			case ETH_TRANSCEIVER_TYPE_1G_SX:
1942			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1943			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1944				phylink_set(if_caps, 1000baseKX_Full);
1945				break;
1946			default:
1947				break;
1948			}
1949
1950		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1951			switch (tcvr_type) {
1952			case ETH_TRANSCEIVER_TYPE_10G_SR:
1953			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1954			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1955			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1956				phylink_set(if_caps, 10000baseSR_Full);
1957				break;
1958			case ETH_TRANSCEIVER_TYPE_10G_LR:
1959			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1960			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
1961			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1962				phylink_set(if_caps, 10000baseLR_Full);
1963				break;
1964			case ETH_TRANSCEIVER_TYPE_10G_LRM:
1965				phylink_set(if_caps, 10000baseLRM_Full);
1966				break;
1967			case ETH_TRANSCEIVER_TYPE_10G_ER:
1968				phylink_set(if_caps, 10000baseR_FEC);
1969				break;
1970			default:
1971				break;
1972			}
1973
1974		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1975			phylink_set(if_caps, 20000baseKR2_Full);
1976
1977		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1978			switch (tcvr_type) {
1979			case ETH_TRANSCEIVER_TYPE_25G_SR:
1980			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1981				phylink_set(if_caps, 25000baseSR_Full);
1982				break;
1983			default:
1984				break;
1985			}
1986
1987		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1988			switch (tcvr_type) {
1989			case ETH_TRANSCEIVER_TYPE_40G_LR4:
1990			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1991			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
1992				phylink_set(if_caps, 40000baseLR4_Full);
1993				break;
1994			case ETH_TRANSCEIVER_TYPE_40G_SR4:
1995			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
1996			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1997				phylink_set(if_caps, 40000baseSR4_Full);
1998				break;
1999			default:
2000				break;
2001			}
2002
2003		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2004			phylink_set(if_caps, 50000baseKR2_Full);
2005
2006		if (capability &
2007		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2008			switch (tcvr_type) {
2009			case ETH_TRANSCEIVER_TYPE_100G_SR4:
2010			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2011				phylink_set(if_caps, 100000baseSR4_Full);
2012				break;
2013			case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2014				phylink_set(if_caps, 100000baseLR4_ER4_Full);
2015				break;
2016			default:
2017				break;
2018			}
 
 
 
 
 
 
 
2019
2020		break;
2021	case MEDIA_KR:
2022		phylink_set(if_caps, Backplane);
2023
2024		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2025			phylink_set(if_caps, 20000baseKR2_Full);
2026		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
2027			phylink_set(if_caps, 1000baseKX_Full);
2028		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
2029			phylink_set(if_caps, 10000baseKR_Full);
2030		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2031			phylink_set(if_caps, 25000baseKR_Full);
2032		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2033			phylink_set(if_caps, 40000baseKR4_Full);
2034		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2035			phylink_set(if_caps, 50000baseKR2_Full);
 
 
 
 
 
2036		if (capability &
2037		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2038			phylink_set(if_caps, 100000baseKR4_Full);
2039
2040		break;
2041	case MEDIA_UNSPECIFIED:
2042	case MEDIA_NOT_PRESENT:
2043	default:
2044		DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
2045			   "Unknown media and transceiver type;\n");
2046		break;
2047	}
2048}
2049
2050static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
2051{
2052	*speed_mask = 0;
2053
2054	if (caps &
2055	    (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
2056		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2057	if (caps & QED_LINK_PARTNER_SPEED_10G)
2058		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2059	if (caps & QED_LINK_PARTNER_SPEED_20G)
2060		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
2061	if (caps & QED_LINK_PARTNER_SPEED_25G)
2062		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2063	if (caps & QED_LINK_PARTNER_SPEED_40G)
2064		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2065	if (caps & QED_LINK_PARTNER_SPEED_50G)
2066		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
2067	if (caps & QED_LINK_PARTNER_SPEED_100G)
2068		*speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
2069}
2070
2071static void qed_fill_link(struct qed_hwfn *hwfn,
2072			  struct qed_ptt *ptt,
2073			  struct qed_link_output *if_link)
2074{
2075	struct qed_mcp_link_capabilities link_caps;
2076	struct qed_mcp_link_params params;
2077	struct qed_mcp_link_state link;
2078	u32 media_type, speed_mask;
2079
2080	memset(if_link, 0, sizeof(*if_link));
2081
2082	/* Prepare source inputs */
2083	if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
2084		dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
2085		return;
2086	}
2087
2088	/* Set the link parameters to pass to protocol driver */
2089	if (link.link_up)
2090		if_link->link_up = true;
2091
2092	if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) {
2093		if (link_caps.default_ext_autoneg)
2094			phylink_set(if_link->supported_caps, Autoneg);
2095
2096		linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2097
2098		if (params.ext_speed.autoneg)
2099			phylink_set(if_link->advertised_caps, Autoneg);
2100		else
2101			phylink_clear(if_link->advertised_caps, Autoneg);
2102
2103		qed_fill_link_capability(hwfn, ptt,
2104					 params.ext_speed.advertised_speeds,
2105					 if_link->advertised_caps);
2106	} else {
2107		if (link_caps.default_speed_autoneg)
2108			phylink_set(if_link->supported_caps, Autoneg);
2109
2110		linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2111
2112		if (params.speed.autoneg)
2113			phylink_set(if_link->advertised_caps, Autoneg);
2114		else
2115			phylink_clear(if_link->advertised_caps, Autoneg);
2116	}
2117
2118	if (params.pause.autoneg ||
2119	    (params.pause.forced_rx && params.pause.forced_tx))
2120		phylink_set(if_link->supported_caps, Asym_Pause);
2121	if (params.pause.autoneg || params.pause.forced_rx ||
2122	    params.pause.forced_tx)
2123		phylink_set(if_link->supported_caps, Pause);
2124
2125	if_link->sup_fec = link_caps.fec_default;
2126	if_link->active_fec = params.fec;
 
 
 
2127
2128	/* Fill link advertised capability */
2129	qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
2130				 if_link->advertised_caps);
2131
2132	/* Fill link supported capability */
2133	qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
2134				 if_link->supported_caps);
2135
2136	/* Fill partner advertised capability */
2137	qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
2138	qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
2139
2140	if (link.link_up)
2141		if_link->speed = link.speed;
2142
2143	/* TODO - fill duplex properly */
2144	if_link->duplex = DUPLEX_FULL;
2145	qed_mcp_get_media_type(hwfn, ptt, &media_type);
2146	if_link->port = qed_get_port_type(media_type);
2147
2148	if_link->autoneg = params.speed.autoneg;
2149
2150	if (params.pause.autoneg)
2151		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2152	if (params.pause.forced_rx)
2153		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2154	if (params.pause.forced_tx)
2155		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2157	if (link.an_complete)
2158		phylink_set(if_link->lp_caps, Autoneg);
 
2159	if (link.partner_adv_pause)
2160		phylink_set(if_link->lp_caps, Pause);
2161	if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
2162	    link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
2163		phylink_set(if_link->lp_caps, Asym_Pause);
2164
2165	if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
2166		if_link->eee_supported = false;
2167	} else {
2168		if_link->eee_supported = true;
2169		if_link->eee_active = link.eee_active;
2170		if_link->sup_caps = link_caps.eee_speed_caps;
2171		/* MFW clears adv_caps on eee disable; use configured value */
2172		if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
2173					params.eee.adv_caps;
2174		if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
2175		if_link->eee.enable = params.eee.enable;
2176		if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
2177		if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
2178	}
2179}
2180
2181static void qed_get_current_link(struct qed_dev *cdev,
2182				 struct qed_link_output *if_link)
2183{
2184	struct qed_hwfn *hwfn;
2185	struct qed_ptt *ptt;
2186	int i;
2187
2188	hwfn = &cdev->hwfns[0];
2189	if (IS_PF(cdev)) {
2190		ptt = qed_ptt_acquire(hwfn);
2191		if (ptt) {
2192			qed_fill_link(hwfn, ptt, if_link);
2193			qed_ptt_release(hwfn, ptt);
2194		} else {
2195			DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
2196		}
2197	} else {
2198		qed_fill_link(hwfn, NULL, if_link);
2199	}
2200
2201	for_each_hwfn(cdev, i)
2202		qed_inform_vf_link_state(&cdev->hwfns[i]);
2203}
2204
2205void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2206{
2207	void *cookie = hwfn->cdev->ops_cookie;
2208	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2209	struct qed_link_output if_link;
2210
2211	qed_fill_link(hwfn, ptt, &if_link);
2212	qed_inform_vf_link_state(hwfn);
2213
2214	if (IS_LEAD_HWFN(hwfn) && cookie)
2215		op->link_update(cookie, &if_link);
2216}
2217
2218void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2219{
2220	void *cookie = hwfn->cdev->ops_cookie;
2221	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2222
2223	if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
2224		op->bw_update(cookie);
2225}
2226
2227static int qed_drain(struct qed_dev *cdev)
2228{
2229	struct qed_hwfn *hwfn;
2230	struct qed_ptt *ptt;
2231	int i, rc;
2232
2233	if (IS_VF(cdev))
2234		return 0;
2235
2236	for_each_hwfn(cdev, i) {
2237		hwfn = &cdev->hwfns[i];
2238		ptt = qed_ptt_acquire(hwfn);
2239		if (!ptt) {
2240			DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
2241			return -EBUSY;
2242		}
2243		rc = qed_mcp_drain(hwfn, ptt);
2244		qed_ptt_release(hwfn, ptt);
2245		if (rc)
2246			return rc;
2247	}
2248
2249	return 0;
2250}
2251
2252static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
2253					  struct qed_nvm_image_att *nvm_image,
2254					  u32 *crc)
2255{
2256	u8 *buf = NULL;
2257	int rc;
 
2258
2259	/* Allocate a buffer for holding the nvram image */
2260	buf = kzalloc(nvm_image->length, GFP_KERNEL);
2261	if (!buf)
2262		return -ENOMEM;
2263
2264	/* Read image into buffer */
2265	rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
2266			      buf, nvm_image->length);
2267	if (rc) {
2268		DP_ERR(cdev, "Failed reading image from nvm\n");
2269		goto out;
2270	}
2271
2272	/* Convert the buffer into big-endian format (excluding the
2273	 * closing 4 bytes of CRC).
2274	 */
2275	cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
2276			  DIV_ROUND_UP(nvm_image->length - 4, 4));
 
 
2277
2278	/* Calc CRC for the "actual" image buffer, i.e. not including
2279	 * the last 4 CRC bytes.
2280	 */
2281	*crc = ~crc32(~0U, buf, nvm_image->length - 4);
2282	*crc = (__force u32)cpu_to_be32p(crc);
2283
2284out:
2285	kfree(buf);
2286
2287	return rc;
2288}
2289
2290/* Binary file format -
2291 *     /----------------------------------------------------------------------\
2292 * 0B  |                       0x4 [command index]                            |
2293 * 4B  | image_type     | Options        |  Number of register settings       |
2294 * 8B  |                       Value                                          |
2295 * 12B |                       Mask                                           |
2296 * 16B |                       Offset                                         |
2297 *     \----------------------------------------------------------------------/
2298 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2299 * Options - 0'b - Calculate & Update CRC for image
2300 */
2301static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2302				      bool *check_resp)
2303{
2304	struct qed_nvm_image_att nvm_image;
2305	struct qed_hwfn *p_hwfn;
2306	bool is_crc = false;
2307	u32 image_type;
2308	int rc = 0, i;
2309	u16 len;
2310
2311	*data += 4;
2312	image_type = **data;
2313	p_hwfn = QED_LEADING_HWFN(cdev);
2314	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2315		if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2316			break;
2317	if (i == p_hwfn->nvm_info.num_images) {
2318		DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2319		       image_type);
2320		return -ENOENT;
2321	}
2322
2323	nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2324	nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2325
2326	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2327		   "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2328		   **data, image_type, nvm_image.start_addr,
2329		   nvm_image.start_addr + nvm_image.length - 1);
2330	(*data)++;
2331	is_crc = !!(**data & BIT(0));
2332	(*data)++;
2333	len = *((u16 *)*data);
2334	*data += 2;
2335	if (is_crc) {
2336		u32 crc = 0;
2337
2338		rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2339		if (rc) {
2340			DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2341			goto exit;
2342		}
2343
2344		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2345				       (nvm_image.start_addr +
2346					nvm_image.length - 4), (u8 *)&crc, 4);
2347		if (rc)
2348			DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2349			       nvm_image.start_addr + nvm_image.length - 4, rc);
2350		goto exit;
2351	}
2352
2353	/* Iterate over the values for setting */
2354	while (len) {
2355		u32 offset, mask, value, cur_value;
2356		u8 buf[4];
2357
2358		value = *((u32 *)*data);
2359		*data += 4;
2360		mask = *((u32 *)*data);
2361		*data += 4;
2362		offset = *((u32 *)*data);
2363		*data += 4;
2364
2365		rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2366				      4);
2367		if (rc) {
2368			DP_ERR(cdev, "Failed reading from %08x\n",
2369			       nvm_image.start_addr + offset);
2370			goto exit;
2371		}
2372
2373		cur_value = le32_to_cpu(*((__le32 *)buf));
2374		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2375			   "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2376			   nvm_image.start_addr + offset, cur_value,
2377			   (cur_value & ~mask) | (value & mask), value, mask);
2378		value = (value & mask) | (cur_value & ~mask);
2379		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2380				       nvm_image.start_addr + offset,
2381				       (u8 *)&value, 4);
2382		if (rc) {
2383			DP_ERR(cdev, "Failed writing to %08x\n",
2384			       nvm_image.start_addr + offset);
2385			goto exit;
2386		}
2387
2388		len--;
2389	}
2390exit:
2391	return rc;
2392}
2393
2394/* Binary file format -
2395 *     /----------------------------------------------------------------------\
2396 * 0B  |                       0x3 [command index]                            |
2397 * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2398 * 8B  | File-type |                   reserved                               |
2399 * 12B |                    Image length in bytes                             |
2400 *     \----------------------------------------------------------------------/
2401 *     Start a new file of the provided type
2402 */
2403static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2404					  const u8 **data, bool *check_resp)
2405{
2406	u32 file_type, file_size = 0;
2407	int rc;
2408
2409	*data += 4;
2410	*check_resp = !!(**data & BIT(0));
2411	*data += 4;
2412	file_type = **data;
2413
2414	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2415		   "About to start a new file of type %02x\n", file_type);
2416	if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2417		*data += 4;
2418		file_size = *((u32 *)(*data));
2419	}
2420
2421	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2422			       (u8 *)(&file_size), 4);
2423	*data += 4;
2424
2425	return rc;
2426}
2427
2428/* Binary file format -
2429 *     /----------------------------------------------------------------------\
2430 * 0B  |                       0x2 [command index]                            |
2431 * 4B  |                       Length in bytes                                |
2432 * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2433 * 12B |                       Offset in bytes                                |
2434 * 16B |                       Data ...                                       |
2435 *     \----------------------------------------------------------------------/
2436 *     Write data as part of a file that was previously started. Data should be
2437 *     of length equal to that provided in the message
2438 */
2439static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2440					 const u8 **data, bool *check_resp)
2441{
2442	u32 offset, len;
2443	int rc;
2444
2445	*data += 4;
2446	len = *((u32 *)(*data));
2447	*data += 4;
2448	*check_resp = !!(**data & BIT(0));
2449	*data += 4;
2450	offset = *((u32 *)(*data));
2451	*data += 4;
2452
2453	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2454		   "About to write File-data: %08x bytes to offset %08x\n",
2455		   len, offset);
2456
2457	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2458			       (char *)(*data), len);
2459	*data += len;
2460
2461	return rc;
2462}
2463
2464/* Binary file format [General header] -
2465 *     /----------------------------------------------------------------------\
2466 * 0B  |                       QED_NVM_SIGNATURE                              |
2467 * 4B  |                       Length in bytes                                |
2468 * 8B  | Highest command in this batchfile |          Reserved                |
2469 *     \----------------------------------------------------------------------/
2470 */
2471static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2472					const struct firmware *image,
2473					const u8 **data)
2474{
2475	u32 signature, len;
2476
2477	/* Check minimum size */
2478	if (image->size < 12) {
2479		DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2480		return -EINVAL;
2481	}
2482
2483	/* Check signature */
2484	signature = *((u32 *)(*data));
2485	if (signature != QED_NVM_SIGNATURE) {
2486		DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2487		return -EINVAL;
2488	}
2489
2490	*data += 4;
2491	/* Validate internal size equals the image-size */
2492	len = *((u32 *)(*data));
2493	if (len != image->size) {
2494		DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2495		       len, (u32)image->size);
2496		return -EINVAL;
2497	}
2498
2499	*data += 4;
2500	/* Make sure driver familiar with all commands necessary for this */
2501	if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2502		DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2503		       *((u16 *)(*data)));
2504		return -EINVAL;
2505	}
2506
2507	*data += 4;
2508
2509	return 0;
2510}
2511
2512/* Binary file format -
2513 *     /----------------------------------------------------------------------\
2514 * 0B  |                       0x5 [command index]                            |
2515 * 4B  | Number of config attributes     |          Reserved                  |
2516 * 4B  | Config ID                       | Entity ID      | Length            |
2517 * 4B  | Value                                                                |
2518 *     |                                                                      |
2519 *     \----------------------------------------------------------------------/
2520 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2521 * 'Number of config attributes'.
2522 *
2523 * The API parses config attributes from the user provided buffer and flashes
2524 * them to the respective NVM path using Management FW inerface.
2525 */
2526static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2527{
2528	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2529	u8 entity_id, len, buf[32];
2530	bool need_nvm_init = true;
2531	struct qed_ptt *ptt;
2532	u16 cfg_id, count;
2533	int rc = 0, i;
2534	u32 flags;
2535
2536	ptt = qed_ptt_acquire(hwfn);
2537	if (!ptt)
2538		return -EAGAIN;
2539
2540	/* NVM CFG ID attribute header */
2541	*data += 4;
2542	count = *((u16 *)*data);
2543	*data += 4;
2544
2545	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2546		   "Read config ids: num_attrs = %0d\n", count);
2547	/* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2548	 * arithmetic operations in the implementation.
2549	 */
2550	for (i = 1; i <= count; i++) {
2551		cfg_id = *((u16 *)*data);
2552		*data += 2;
2553		entity_id = **data;
2554		(*data)++;
2555		len = **data;
2556		(*data)++;
2557		memcpy(buf, *data, len);
2558		*data += len;
2559
2560		flags = 0;
2561		if (need_nvm_init) {
2562			flags |= QED_NVM_CFG_OPTION_INIT;
2563			need_nvm_init = false;
2564		}
2565
2566		/* Commit to flash and free the resources */
2567		if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2568			flags |= QED_NVM_CFG_OPTION_COMMIT |
2569				 QED_NVM_CFG_OPTION_FREE;
2570			need_nvm_init = true;
2571		}
2572
2573		if (entity_id)
2574			flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2575
2576		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2577			   "cfg_id = %d entity = %d len = %d\n", cfg_id,
2578			   entity_id, len);
2579		rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2580					 buf, len);
2581		if (rc) {
2582			DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2583			break;
2584		}
2585	}
2586
2587	qed_ptt_release(hwfn, ptt);
2588
2589	return rc;
2590}
2591
2592#define QED_MAX_NVM_BUF_LEN	32
2593static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2594{
2595	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2596	u8 buf[QED_MAX_NVM_BUF_LEN];
2597	struct qed_ptt *ptt;
2598	u32 len;
2599	int rc;
2600
2601	ptt = qed_ptt_acquire(hwfn);
2602	if (!ptt)
2603		return QED_MAX_NVM_BUF_LEN;
2604
2605	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2606				 &len);
2607	if (rc || !len) {
2608		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2609		len = QED_MAX_NVM_BUF_LEN;
2610	}
2611
2612	qed_ptt_release(hwfn, ptt);
2613
2614	return len;
2615}
2616
2617static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2618				  u32 cmd, u32 entity_id)
2619{
2620	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2621	struct qed_ptt *ptt;
2622	u32 flags, len;
2623	int rc = 0;
2624
2625	ptt = qed_ptt_acquire(hwfn);
2626	if (!ptt)
2627		return -EAGAIN;
2628
2629	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2630		   "Read config cmd = %d entity id %d\n", cmd, entity_id);
2631	flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2632	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2633	if (rc)
2634		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2635
2636	qed_ptt_release(hwfn, ptt);
2637
2638	return rc;
2639}
2640
2641static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2642{
2643	const struct firmware *image;
2644	const u8 *data, *data_end;
2645	u32 cmd_type;
2646	int rc;
2647
2648	rc = request_firmware(&image, name, &cdev->pdev->dev);
2649	if (rc) {
2650		DP_ERR(cdev, "Failed to find '%s'\n", name);
2651		return rc;
2652	}
2653
2654	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2655		   "Flashing '%s' - firmware's data at %p, size is %08x\n",
2656		   name, image->data, (u32)image->size);
2657	data = image->data;
2658	data_end = data + image->size;
2659
2660	rc = qed_nvm_flash_image_validate(cdev, image, &data);
2661	if (rc)
2662		goto exit;
2663
2664	while (data < data_end) {
2665		bool check_resp = false;
2666
2667		/* Parse the actual command */
2668		cmd_type = *((u32 *)data);
2669		switch (cmd_type) {
2670		case QED_NVM_FLASH_CMD_FILE_DATA:
2671			rc = qed_nvm_flash_image_file_data(cdev, &data,
2672							   &check_resp);
2673			break;
2674		case QED_NVM_FLASH_CMD_FILE_START:
2675			rc = qed_nvm_flash_image_file_start(cdev, &data,
2676							    &check_resp);
2677			break;
2678		case QED_NVM_FLASH_CMD_NVM_CHANGE:
2679			rc = qed_nvm_flash_image_access(cdev, &data,
2680							&check_resp);
2681			break;
2682		case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2683			rc = qed_nvm_flash_cfg_write(cdev, &data);
2684			break;
2685		default:
2686			DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2687			rc = -EINVAL;
2688			goto exit;
2689		}
2690
2691		if (rc) {
2692			DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2693			goto exit;
2694		}
2695
2696		/* Check response if needed */
2697		if (check_resp) {
2698			u32 mcp_response = 0;
2699
2700			if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2701				DP_ERR(cdev, "Failed getting MCP response\n");
2702				rc = -EINVAL;
2703				goto exit;
2704			}
2705
2706			switch (mcp_response & FW_MSG_CODE_MASK) {
2707			case FW_MSG_CODE_OK:
2708			case FW_MSG_CODE_NVM_OK:
2709			case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2710			case FW_MSG_CODE_PHY_OK:
2711				break;
2712			default:
2713				DP_ERR(cdev, "MFW returns error: %08x\n",
2714				       mcp_response);
2715				rc = -EINVAL;
2716				goto exit;
2717			}
2718		}
2719	}
2720
2721exit:
2722	release_firmware(image);
2723
2724	return rc;
2725}
2726
2727static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2728			     u8 *buf, u16 len)
2729{
2730	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2731
2732	return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2733}
2734
2735void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2736{
2737	struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2738	void *cookie = p_hwfn->cdev->ops_cookie;
2739
2740	if (ops && ops->schedule_recovery_handler)
2741		ops->schedule_recovery_handler(cookie);
2742}
2743
2744static const char * const qed_hw_err_type_descr[] = {
2745	[QED_HW_ERR_FAN_FAIL]		= "Fan Failure",
2746	[QED_HW_ERR_MFW_RESP_FAIL]	= "MFW Response Failure",
2747	[QED_HW_ERR_HW_ATTN]		= "HW Attention",
2748	[QED_HW_ERR_DMAE_FAIL]		= "DMAE Failure",
2749	[QED_HW_ERR_RAMROD_FAIL]	= "Ramrod Failure",
2750	[QED_HW_ERR_FW_ASSERT]		= "FW Assertion",
2751	[QED_HW_ERR_LAST]		= "Unknown",
2752};
2753
2754void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
2755			   enum qed_hw_err_type err_type)
2756{
2757	struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2758	void *cookie = p_hwfn->cdev->ops_cookie;
2759	const char *err_str;
2760
2761	if (err_type > QED_HW_ERR_LAST)
2762		err_type = QED_HW_ERR_LAST;
2763	err_str = qed_hw_err_type_descr[err_type];
2764
2765	DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
2766
2767	/* Call the HW error handler of the protocol driver.
2768	 * If it is not available - perform a minimal handling of preventing
2769	 * HW attentions from being reasserted.
2770	 */
2771	if (ops && ops->schedule_hw_err_handler)
2772		ops->schedule_hw_err_handler(cookie, err_type);
2773	else
2774		qed_int_attn_clr_enable(p_hwfn->cdev, true);
2775}
2776
2777static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2778			    void *handle)
2779{
2780		return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2781}
2782
2783static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2784{
2785	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2786	struct qed_ptt *ptt;
2787	int status = 0;
2788
2789	ptt = qed_ptt_acquire(hwfn);
2790	if (!ptt)
2791		return -EAGAIN;
2792
2793	status = qed_mcp_set_led(hwfn, ptt, mode);
2794
2795	qed_ptt_release(hwfn, ptt);
2796
2797	return status;
2798}
2799
2800int qed_recovery_process(struct qed_dev *cdev)
2801{
2802	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2803	struct qed_ptt *p_ptt;
2804	int rc = 0;
2805
2806	p_ptt = qed_ptt_acquire(p_hwfn);
2807	if (!p_ptt)
2808		return -EAGAIN;
2809
2810	rc = qed_start_recovery_process(p_hwfn, p_ptt);
2811
2812	qed_ptt_release(p_hwfn, p_ptt);
2813
2814	return rc;
2815}
2816
2817static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2818{
2819	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2820	struct qed_ptt *ptt;
2821	int rc = 0;
2822
2823	if (IS_VF(cdev))
2824		return 0;
2825
2826	ptt = qed_ptt_acquire(hwfn);
2827	if (!ptt)
2828		return -EAGAIN;
2829
2830	rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2831				   : QED_OV_WOL_DISABLED);
2832	if (rc)
2833		goto out;
2834	rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2835
2836out:
2837	qed_ptt_release(hwfn, ptt);
2838	return rc;
2839}
2840
2841static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2842{
2843	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2844	struct qed_ptt *ptt;
2845	int status = 0;
2846
2847	if (IS_VF(cdev))
2848		return 0;
2849
2850	ptt = qed_ptt_acquire(hwfn);
2851	if (!ptt)
2852		return -EAGAIN;
2853
2854	status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2855						QED_OV_DRIVER_STATE_ACTIVE :
2856						QED_OV_DRIVER_STATE_DISABLED);
2857
2858	qed_ptt_release(hwfn, ptt);
2859
2860	return status;
2861}
2862
2863static int qed_update_mac(struct qed_dev *cdev, const u8 *mac)
2864{
2865	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2866	struct qed_ptt *ptt;
2867	int status = 0;
2868
2869	if (IS_VF(cdev))
2870		return 0;
2871
2872	ptt = qed_ptt_acquire(hwfn);
2873	if (!ptt)
2874		return -EAGAIN;
2875
2876	status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2877	if (status)
2878		goto out;
2879
2880	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2881
2882out:
2883	qed_ptt_release(hwfn, ptt);
2884	return status;
2885}
2886
2887static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2888{
2889	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2890	struct qed_ptt *ptt;
2891	int status = 0;
2892
2893	if (IS_VF(cdev))
2894		return 0;
2895
2896	ptt = qed_ptt_acquire(hwfn);
2897	if (!ptt)
2898		return -EAGAIN;
2899
2900	status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2901	if (status)
2902		goto out;
2903
2904	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2905
2906out:
2907	qed_ptt_release(hwfn, ptt);
2908	return status;
2909}
2910
2911static int
2912qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb,
2913		u16 qid, struct qed_sb_info_dbg *sb_dbg)
2914{
2915	struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns];
2916	struct qed_ptt *ptt;
2917	int rc;
2918
2919	if (IS_VF(cdev))
2920		return -EINVAL;
2921
2922	ptt = qed_ptt_acquire(hwfn);
2923	if (!ptt) {
2924		DP_NOTICE(hwfn, "Can't acquire PTT\n");
2925		return -EAGAIN;
2926	}
2927
2928	memset(sb_dbg, 0, sizeof(*sb_dbg));
2929	rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
2930
2931	qed_ptt_release(hwfn, ptt);
2932	return rc;
2933}
2934
2935static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2936				  u8 dev_addr, u32 offset, u32 len)
2937{
2938	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2939	struct qed_ptt *ptt;
2940	int rc = 0;
2941
2942	if (IS_VF(cdev))
2943		return 0;
2944
2945	ptt = qed_ptt_acquire(hwfn);
2946	if (!ptt)
2947		return -EAGAIN;
2948
2949	rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2950				  offset, len, buf);
2951
2952	qed_ptt_release(hwfn, ptt);
2953
2954	return rc;
2955}
2956
2957static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2958{
2959	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2960	struct qed_ptt *ptt;
2961	int rc = 0;
2962
2963	if (IS_VF(cdev))
2964		return 0;
2965
2966	ptt = qed_ptt_acquire(hwfn);
2967	if (!ptt)
2968		return -EAGAIN;
2969
2970	rc = qed_dbg_grc_config(hwfn, cfg_id, val);
2971
2972	qed_ptt_release(hwfn, ptt);
2973
2974	return rc;
2975}
2976
2977static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...)
2978{
2979	char buf[QED_MFW_REPORT_STR_SIZE];
2980	struct qed_hwfn *p_hwfn;
2981	struct qed_ptt *p_ptt;
2982	va_list vl;
2983
2984	va_start(vl, fmt);
2985	vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl);
2986	va_end(vl);
2987
2988	if (IS_PF(cdev)) {
2989		p_hwfn = QED_LEADING_HWFN(cdev);
2990		p_ptt = qed_ptt_acquire(p_hwfn);
2991		if (p_ptt) {
2992			qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf));
2993			qed_ptt_release(p_hwfn, p_ptt);
2994		}
2995	}
2996}
2997
2998static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2999{
3000	return QED_AFFIN_HWFN_IDX(cdev);
3001}
3002
3003static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active)
3004{
3005	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
3006	struct qed_ptt *ptt;
3007	int rc = 0;
3008
3009	*esl_active = false;
3010
3011	if (IS_VF(cdev))
3012		return 0;
3013
3014	ptt = qed_ptt_acquire(hwfn);
3015	if (!ptt)
3016		return -EAGAIN;
3017
3018	rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active);
3019
3020	qed_ptt_release(hwfn, ptt);
3021
3022	return rc;
3023}
3024
3025static struct qed_selftest_ops qed_selftest_ops_pass = {
3026	.selftest_memory = &qed_selftest_memory,
3027	.selftest_interrupt = &qed_selftest_interrupt,
3028	.selftest_register = &qed_selftest_register,
3029	.selftest_clock = &qed_selftest_clock,
3030	.selftest_nvram = &qed_selftest_nvram,
3031};
3032
3033const struct qed_common_ops qed_common_ops_pass = {
3034	.selftest = &qed_selftest_ops_pass,
3035	.probe = &qed_probe,
3036	.remove = &qed_remove,
3037	.set_power_state = &qed_set_power_state,
3038	.set_name = &qed_set_name,
3039	.update_pf_params = &qed_update_pf_params,
3040	.slowpath_start = &qed_slowpath_start,
3041	.slowpath_stop = &qed_slowpath_stop,
3042	.set_fp_int = &qed_set_int_fp,
3043	.get_fp_int = &qed_get_int_fp,
3044	.sb_init = &qed_sb_init,
3045	.sb_release = &qed_sb_release,
3046	.simd_handler_config = &qed_simd_handler_config,
3047	.simd_handler_clean = &qed_simd_handler_clean,
3048	.dbg_grc = &qed_dbg_grc,
3049	.dbg_grc_size = &qed_dbg_grc_size,
3050	.can_link_change = &qed_can_link_change,
3051	.set_link = &qed_set_link,
3052	.get_link = &qed_get_current_link,
3053	.drain = &qed_drain,
3054	.update_msglvl = &qed_init_dp,
3055	.devlink_register = qed_devlink_register,
3056	.devlink_unregister = qed_devlink_unregister,
3057	.report_fatal_error = qed_report_fatal_error,
3058	.dbg_all_data = &qed_dbg_all_data,
3059	.dbg_all_data_size = &qed_dbg_all_data_size,
3060	.chain_alloc = &qed_chain_alloc,
3061	.chain_free = &qed_chain_free,
3062	.nvm_flash = &qed_nvm_flash,
3063	.nvm_get_image = &qed_nvm_get_image,
3064	.set_coalesce = &qed_set_coalesce,
3065	.set_led = &qed_set_led,
3066	.recovery_process = &qed_recovery_process,
3067	.recovery_prolog = &qed_recovery_prolog,
3068	.attn_clr_enable = &qed_int_attn_clr_enable,
3069	.update_drv_state = &qed_update_drv_state,
3070	.update_mac = &qed_update_mac,
3071	.update_mtu = &qed_update_mtu,
3072	.update_wol = &qed_update_wol,
3073	.db_recovery_add = &qed_db_recovery_add,
3074	.db_recovery_del = &qed_db_recovery_del,
3075	.read_module_eeprom = &qed_read_module_eeprom,
3076	.get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
3077	.read_nvm_cfg = &qed_nvm_flash_cfg_read,
3078	.read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
3079	.set_grc_config = &qed_set_grc_config,
3080	.mfw_report = &qed_mfw_report,
3081	.get_sb_info = &qed_get_sb_info,
3082	.get_esl_status = &qed_get_esl_status,
3083};
3084
3085void qed_get_protocol_stats(struct qed_dev *cdev,
3086			    enum qed_mcp_protocol_type type,
3087			    union qed_mcp_protocol_stats *stats)
3088{
3089	struct qed_eth_stats eth_stats;
3090
3091	memset(stats, 0, sizeof(*stats));
3092
3093	switch (type) {
3094	case QED_MCP_LAN_STATS:
3095		qed_get_vport_stats_context(cdev, &eth_stats, true);
3096		stats->lan_stats.ucast_rx_pkts =
3097					eth_stats.common.rx_ucast_pkts;
3098		stats->lan_stats.ucast_tx_pkts =
3099					eth_stats.common.tx_ucast_pkts;
3100		stats->lan_stats.fcs_err = -1;
3101		break;
3102	case QED_MCP_FCOE_STATS:
3103		qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true);
3104		break;
3105	case QED_MCP_ISCSI_STATS:
3106		qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true);
3107		break;
3108	default:
3109		DP_VERBOSE(cdev, QED_MSG_SP,
3110			   "Invalid protocol type = %d\n", type);
3111		return;
3112	}
3113}
3114
3115int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
3116{
3117	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
3118		   "Scheduling slowpath task [Flag: %d]\n",
3119		   QED_SLOWPATH_MFW_TLV_REQ);
3120	/* Memory barrier for setting atomic bit */
3121	smp_mb__before_atomic();
3122	set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
3123	/* Memory barrier after setting atomic bit */
3124	smp_mb__after_atomic();
3125	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
3126
3127	return 0;
3128}
3129
3130static void
3131qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
3132{
3133	struct qed_common_cb_ops *op = cdev->protocol_ops.common;
3134	struct qed_eth_stats_common *p_common;
3135	struct qed_generic_tlvs gen_tlvs;
3136	struct qed_eth_stats stats;
3137	int i;
3138
3139	memset(&gen_tlvs, 0, sizeof(gen_tlvs));
3140	op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
3141
3142	if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
3143		tlv->flags.ipv4_csum_offload = true;
3144	if (gen_tlvs.feat_flags & QED_TLV_LSO)
3145		tlv->flags.lso_supported = true;
3146	tlv->flags.b_set = true;
3147
3148	for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
3149		if (is_valid_ether_addr(gen_tlvs.mac[i])) {
3150			ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
3151			tlv->mac_set[i] = true;
3152		}
3153	}
3154
3155	qed_get_vport_stats(cdev, &stats);
3156	p_common = &stats.common;
3157	tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
3158			 p_common->rx_bcast_pkts;
3159	tlv->rx_frames_set = true;
3160	tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
3161			p_common->rx_bcast_bytes;
3162	tlv->rx_bytes_set = true;
3163	tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
3164			 p_common->tx_bcast_pkts;
3165	tlv->tx_frames_set = true;
3166	tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
3167			p_common->tx_bcast_bytes;
3168	tlv->rx_bytes_set = true;
3169}
3170
3171int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
3172			  union qed_mfw_tlv_data *tlv_buf)
3173{
3174	struct qed_dev *cdev = hwfn->cdev;
3175	struct qed_common_cb_ops *ops;
3176
3177	ops = cdev->protocol_ops.common;
3178	if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
3179		DP_NOTICE(hwfn, "Can't collect TLV management info\n");
3180		return -EINVAL;
3181	}
3182
3183	switch (type) {
3184	case QED_MFW_TLV_GENERIC:
3185		qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
3186		break;
3187	case QED_MFW_TLV_ETH:
3188		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
3189		break;
3190	case QED_MFW_TLV_FCOE:
3191		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
3192		break;
3193	case QED_MFW_TLV_ISCSI:
3194		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
3195		break;
3196	default:
3197		break;
3198	}
3199
3200	return 0;
3201}
3202
3203unsigned long qed_get_epoch_time(void)
3204{
3205	return ktime_get_real_seconds();
3206}
v5.4
 
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/stddef.h>
  34#include <linux/pci.h>
  35#include <linux/kernel.h>
  36#include <linux/slab.h>
  37#include <linux/delay.h>
  38#include <asm/byteorder.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/string.h>
  41#include <linux/module.h>
  42#include <linux/interrupt.h>
  43#include <linux/workqueue.h>
  44#include <linux/ethtool.h>
  45#include <linux/etherdevice.h>
  46#include <linux/vmalloc.h>
  47#include <linux/crash_dump.h>
  48#include <linux/crc32.h>
  49#include <linux/qed/qed_if.h>
  50#include <linux/qed/qed_ll2_if.h>
  51#include <net/devlink.h>
 
  52
  53#include "qed.h"
  54#include "qed_sriov.h"
  55#include "qed_sp.h"
  56#include "qed_dev_api.h"
  57#include "qed_ll2.h"
  58#include "qed_fcoe.h"
  59#include "qed_iscsi.h"
  60
  61#include "qed_mcp.h"
  62#include "qed_reg_addr.h"
  63#include "qed_hw.h"
  64#include "qed_selftest.h"
  65#include "qed_debug.h"
 
  66
  67#define QED_ROCE_QPS			(8192)
  68#define QED_ROCE_DPIS			(8)
  69#define QED_RDMA_SRQS                   QED_ROCE_QPS
  70#define QED_NVM_CFG_GET_FLAGS		0xA
  71#define QED_NVM_CFG_GET_PF_FLAGS	0x1A
  72#define QED_NVM_CFG_MAX_ATTRS		50
  73
  74static char version[] =
  75	"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
  76
  77MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
  78MODULE_LICENSE("GPL");
  79MODULE_VERSION(DRV_MODULE_VERSION);
  80
  81#define FW_FILE_VERSION				\
  82	__stringify(FW_MAJOR_VERSION) "."	\
  83	__stringify(FW_MINOR_VERSION) "."	\
  84	__stringify(FW_REVISION_VERSION) "."	\
  85	__stringify(FW_ENGINEERING_VERSION)
  86
  87#define QED_FW_FILE_NAME	\
  88	"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
  89
  90MODULE_FIRMWARE(QED_FW_FILE_NAME);
  91
  92static int __init qed_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  93{
  94	pr_info("%s", version);
  95
  96	return 0;
 
  97}
  98
  99static void __exit qed_cleanup(void)
 100{
 101	pr_notice("qed_cleanup called\n");
 
 
 
 
 
 
 102}
 103
 104module_init(qed_init);
 105module_exit(qed_cleanup);
 106
 107/* Check if the DMA controller on the machine can properly handle the DMA
 108 * addressing required by the device.
 109*/
 110static int qed_set_coherency_mask(struct qed_dev *cdev)
 111{
 112	struct device *dev = &cdev->pdev->dev;
 113
 114	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
 115		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
 116			DP_NOTICE(cdev,
 117				  "Can't request 64-bit consistent allocations\n");
 118			return -EIO;
 119		}
 120	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
 121		DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
 122		return -EIO;
 123	}
 124
 125	return 0;
 126}
 
 
 
 
 
 
 
 127
 128static void qed_free_pci(struct qed_dev *cdev)
 129{
 130	struct pci_dev *pdev = cdev->pdev;
 131
 132	if (cdev->doorbells && cdev->db_size)
 133		iounmap(cdev->doorbells);
 134	if (cdev->regview)
 135		iounmap(cdev->regview);
 136	if (atomic_read(&pdev->enable_cnt) == 1)
 137		pci_release_regions(pdev);
 138
 139	pci_disable_device(pdev);
 140}
 141
 142#define PCI_REVISION_ID_ERROR_VAL	0xff
 143
 144/* Performs PCI initializations as well as initializing PCI-related parameters
 145 * in the device structrue. Returns 0 in case of success.
 146 */
 147static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
 148{
 149	u8 rev_id;
 150	int rc;
 151
 152	cdev->pdev = pdev;
 153
 154	rc = pci_enable_device(pdev);
 155	if (rc) {
 156		DP_NOTICE(cdev, "Cannot enable PCI device\n");
 157		goto err0;
 158	}
 159
 160	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 161		DP_NOTICE(cdev, "No memory region found in bar #0\n");
 162		rc = -EIO;
 163		goto err1;
 164	}
 165
 166	if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 167		DP_NOTICE(cdev, "No memory region found in bar #2\n");
 168		rc = -EIO;
 169		goto err1;
 170	}
 171
 172	if (atomic_read(&pdev->enable_cnt) == 1) {
 173		rc = pci_request_regions(pdev, "qed");
 174		if (rc) {
 175			DP_NOTICE(cdev,
 176				  "Failed to request PCI memory resources\n");
 177			goto err1;
 178		}
 179		pci_set_master(pdev);
 180		pci_save_state(pdev);
 181	}
 182
 183	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
 184	if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
 185		DP_NOTICE(cdev,
 186			  "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
 187			  rev_id);
 188		rc = -ENODEV;
 189		goto err2;
 190	}
 191	if (!pci_is_pcie(pdev)) {
 192		DP_NOTICE(cdev, "The bus is not PCI Express\n");
 193		rc = -EIO;
 194		goto err2;
 195	}
 196
 197	cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 198	if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
 199		DP_NOTICE(cdev, "Cannot find power management capability\n");
 200
 201	rc = qed_set_coherency_mask(cdev);
 202	if (rc)
 
 
 203		goto err2;
 
 204
 205	cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
 206	cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
 207	cdev->pci_params.irq = pdev->irq;
 208
 209	cdev->regview = pci_ioremap_bar(pdev, 0);
 210	if (!cdev->regview) {
 211		DP_NOTICE(cdev, "Cannot map register space, aborting\n");
 212		rc = -ENOMEM;
 213		goto err2;
 214	}
 215
 216	cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
 217	cdev->db_size = pci_resource_len(cdev->pdev, 2);
 218	if (!cdev->db_size) {
 219		if (IS_PF(cdev)) {
 220			DP_NOTICE(cdev, "No Doorbell bar available\n");
 221			return -EINVAL;
 222		} else {
 223			return 0;
 224		}
 225	}
 226
 227	cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
 228
 229	if (!cdev->doorbells) {
 230		DP_NOTICE(cdev, "Cannot map doorbell space\n");
 231		return -ENOMEM;
 232	}
 233
 234	return 0;
 235
 236err2:
 237	pci_release_regions(pdev);
 238err1:
 239	pci_disable_device(pdev);
 240err0:
 241	return rc;
 242}
 243
 244int qed_fill_dev_info(struct qed_dev *cdev,
 245		      struct qed_dev_info *dev_info)
 246{
 247	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 248	struct qed_hw_info *hw_info = &p_hwfn->hw_info;
 249	struct qed_tunnel_info *tun = &cdev->tunnel;
 250	struct qed_ptt  *ptt;
 251
 252	memset(dev_info, 0, sizeof(struct qed_dev_info));
 253
 254	if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 255	    tun->vxlan.b_mode_enabled)
 256		dev_info->vxlan_enable = true;
 257
 258	if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
 259	    tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 260	    tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 261		dev_info->gre_enable = true;
 262
 263	if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
 264	    tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
 265	    tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
 266		dev_info->geneve_enable = true;
 267
 268	dev_info->num_hwfns = cdev->num_hwfns;
 269	dev_info->pci_mem_start = cdev->pci_params.mem_start;
 270	dev_info->pci_mem_end = cdev->pci_params.mem_end;
 271	dev_info->pci_irq = cdev->pci_params.irq;
 272	dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
 273	dev_info->dev_type = cdev->type;
 274	ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
 275
 276	if (IS_PF(cdev)) {
 277		dev_info->fw_major = FW_MAJOR_VERSION;
 278		dev_info->fw_minor = FW_MINOR_VERSION;
 279		dev_info->fw_rev = FW_REVISION_VERSION;
 280		dev_info->fw_eng = FW_ENGINEERING_VERSION;
 281		dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
 282						       &cdev->mf_bits);
 
 
 283		dev_info->tx_switching = true;
 284
 285		if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
 286			dev_info->wol_support = true;
 287
 288		dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
 289
 290		dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
 291	} else {
 292		qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
 293				      &dev_info->fw_minor, &dev_info->fw_rev,
 294				      &dev_info->fw_eng);
 295	}
 296
 297	if (IS_PF(cdev)) {
 298		ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
 299		if (ptt) {
 300			qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
 301					    &dev_info->mfw_rev, NULL);
 302
 303			qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
 304					    &dev_info->mbi_version);
 305
 306			qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
 307					       &dev_info->flash_size);
 308
 309			qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
 310		}
 311	} else {
 312		qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
 313				    &dev_info->mfw_rev, NULL);
 314	}
 315
 316	dev_info->mtu = hw_info->mtu;
 
 317
 318	return 0;
 319}
 320
 321static void qed_free_cdev(struct qed_dev *cdev)
 322{
 323	kfree((void *)cdev);
 324}
 325
 326static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
 327{
 328	struct qed_dev *cdev;
 329
 330	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
 331	if (!cdev)
 332		return cdev;
 333
 334	qed_init_struct(cdev);
 335
 336	return cdev;
 337}
 338
 339/* Sets the requested power state */
 340static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
 341{
 342	if (!cdev)
 343		return -ENODEV;
 344
 345	DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
 346	return 0;
 347}
 348
 349struct qed_devlink {
 350	struct qed_dev *cdev;
 351};
 352
 353enum qed_devlink_param_id {
 354	QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
 355	QED_DEVLINK_PARAM_ID_IWARP_CMT,
 356};
 357
 358static int qed_dl_param_get(struct devlink *dl, u32 id,
 359			    struct devlink_param_gset_ctx *ctx)
 360{
 361	struct qed_devlink *qed_dl;
 362	struct qed_dev *cdev;
 363
 364	qed_dl = devlink_priv(dl);
 365	cdev = qed_dl->cdev;
 366	ctx->val.vbool = cdev->iwarp_cmt;
 367
 368	return 0;
 369}
 370
 371static int qed_dl_param_set(struct devlink *dl, u32 id,
 372			    struct devlink_param_gset_ctx *ctx)
 373{
 374	struct qed_devlink *qed_dl;
 375	struct qed_dev *cdev;
 376
 377	qed_dl = devlink_priv(dl);
 378	cdev = qed_dl->cdev;
 379	cdev->iwarp_cmt = ctx->val.vbool;
 380
 381	return 0;
 382}
 383
 384static const struct devlink_param qed_devlink_params[] = {
 385	DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
 386			     "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
 387			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
 388			     qed_dl_param_get, qed_dl_param_set, NULL),
 389};
 390
 391static const struct devlink_ops qed_dl_ops;
 392
 393static int qed_devlink_register(struct qed_dev *cdev)
 394{
 395	union devlink_param_value value;
 396	struct qed_devlink *qed_dl;
 397	struct devlink *dl;
 398	int rc;
 399
 400	dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
 401	if (!dl)
 402		return -ENOMEM;
 403
 404	qed_dl = devlink_priv(dl);
 405
 406	cdev->dl = dl;
 407	qed_dl->cdev = cdev;
 408
 409	rc = devlink_register(dl, &cdev->pdev->dev);
 410	if (rc)
 411		goto err_free;
 412
 413	rc = devlink_params_register(dl, qed_devlink_params,
 414				     ARRAY_SIZE(qed_devlink_params));
 415	if (rc)
 416		goto err_unregister;
 417
 418	value.vbool = false;
 419	devlink_param_driverinit_value_set(dl,
 420					   QED_DEVLINK_PARAM_ID_IWARP_CMT,
 421					   value);
 422
 423	devlink_params_publish(dl);
 424	cdev->iwarp_cmt = false;
 425
 426	return 0;
 427
 428err_unregister:
 429	devlink_unregister(dl);
 430
 431err_free:
 432	cdev->dl = NULL;
 433	devlink_free(dl);
 434
 435	return rc;
 436}
 437
 438static void qed_devlink_unregister(struct qed_dev *cdev)
 439{
 440	if (!cdev->dl)
 441		return;
 442
 443	devlink_params_unregister(cdev->dl, qed_devlink_params,
 444				  ARRAY_SIZE(qed_devlink_params));
 445
 446	devlink_unregister(cdev->dl);
 447	devlink_free(cdev->dl);
 448}
 449
 450/* probing */
 451static struct qed_dev *qed_probe(struct pci_dev *pdev,
 452				 struct qed_probe_params *params)
 453{
 454	struct qed_dev *cdev;
 455	int rc;
 456
 457	cdev = qed_alloc_cdev(pdev);
 458	if (!cdev)
 459		goto err0;
 460
 461	cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
 462	cdev->protocol = params->protocol;
 463
 464	if (params->is_vf)
 465		cdev->b_is_vf = true;
 466
 467	qed_init_dp(cdev, params->dp_module, params->dp_level);
 468
 469	cdev->recov_in_prog = params->recov_in_prog;
 470
 471	rc = qed_init_pci(cdev, pdev);
 472	if (rc) {
 473		DP_ERR(cdev, "init pci failed\n");
 474		goto err1;
 475	}
 476	DP_INFO(cdev, "PCI init completed successfully\n");
 477
 478	rc = qed_devlink_register(cdev);
 479	if (rc) {
 480		DP_INFO(cdev, "Failed to register devlink.\n");
 481		goto err2;
 482	}
 483
 484	rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
 485	if (rc) {
 486		DP_ERR(cdev, "hw prepare failed\n");
 487		goto err2;
 488	}
 489
 490	DP_INFO(cdev, "qed_probe completed successfully\n");
 491
 492	return cdev;
 493
 494err2:
 495	qed_free_pci(cdev);
 496err1:
 497	qed_free_cdev(cdev);
 498err0:
 499	return NULL;
 500}
 501
 502static void qed_remove(struct qed_dev *cdev)
 503{
 504	if (!cdev)
 505		return;
 506
 507	qed_hw_remove(cdev);
 508
 509	qed_free_pci(cdev);
 510
 511	qed_set_power_state(cdev, PCI_D3hot);
 512
 513	qed_devlink_unregister(cdev);
 514
 515	qed_free_cdev(cdev);
 516}
 517
 518static void qed_disable_msix(struct qed_dev *cdev)
 519{
 520	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 521		pci_disable_msix(cdev->pdev);
 522		kfree(cdev->int_params.msix_table);
 523	} else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
 524		pci_disable_msi(cdev->pdev);
 525	}
 526
 527	memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
 528}
 529
 530static int qed_enable_msix(struct qed_dev *cdev,
 531			   struct qed_int_params *int_params)
 532{
 533	int i, rc, cnt;
 534
 535	cnt = int_params->in.num_vectors;
 536
 537	for (i = 0; i < cnt; i++)
 538		int_params->msix_table[i].entry = i;
 539
 540	rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
 541				   int_params->in.min_msix_cnt, cnt);
 542	if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
 543	    (rc % cdev->num_hwfns)) {
 544		pci_disable_msix(cdev->pdev);
 545
 546		/* If fastpath is initialized, we need at least one interrupt
 547		 * per hwfn [and the slow path interrupts]. New requested number
 548		 * should be a multiple of the number of hwfns.
 549		 */
 550		cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
 551		DP_NOTICE(cdev,
 552			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
 553			  cnt, int_params->in.num_vectors);
 554		rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
 555					   cnt);
 556		if (!rc)
 557			rc = cnt;
 558	}
 559
 560	if (rc > 0) {
 
 
 
 
 
 561		/* MSI-x configuration was achieved */
 562		int_params->out.int_mode = QED_INT_MODE_MSIX;
 563		int_params->out.num_vectors = rc;
 564		rc = 0;
 565	} else {
 566		DP_NOTICE(cdev,
 567			  "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
 568			  cnt, rc);
 569	}
 570
 571	return rc;
 572}
 573
 574/* This function outputs the int mode and the number of enabled msix vector */
 575static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
 576{
 577	struct qed_int_params *int_params = &cdev->int_params;
 578	struct msix_entry *tbl;
 579	int rc = 0, cnt;
 580
 581	switch (int_params->in.int_mode) {
 582	case QED_INT_MODE_MSIX:
 583		/* Allocate MSIX table */
 584		cnt = int_params->in.num_vectors;
 585		int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
 586		if (!int_params->msix_table) {
 587			rc = -ENOMEM;
 588			goto out;
 589		}
 590
 591		/* Enable MSIX */
 592		rc = qed_enable_msix(cdev, int_params);
 593		if (!rc)
 594			goto out;
 595
 596		DP_NOTICE(cdev, "Failed to enable MSI-X\n");
 597		kfree(int_params->msix_table);
 598		if (force_mode)
 599			goto out;
 600		/* Fallthrough */
 601
 602	case QED_INT_MODE_MSI:
 603		if (cdev->num_hwfns == 1) {
 604			rc = pci_enable_msi(cdev->pdev);
 605			if (!rc) {
 606				int_params->out.int_mode = QED_INT_MODE_MSI;
 607				goto out;
 608			}
 609
 610			DP_NOTICE(cdev, "Failed to enable MSI\n");
 611			if (force_mode)
 612				goto out;
 613		}
 614		/* Fallthrough */
 615
 616	case QED_INT_MODE_INTA:
 617			int_params->out.int_mode = QED_INT_MODE_INTA;
 618			rc = 0;
 619			goto out;
 620	default:
 621		DP_NOTICE(cdev, "Unknown int_mode value %d\n",
 622			  int_params->in.int_mode);
 623		rc = -EINVAL;
 624	}
 625
 626out:
 627	if (!rc)
 628		DP_INFO(cdev, "Using %s interrupts\n",
 629			int_params->out.int_mode == QED_INT_MODE_INTA ?
 630			"INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
 631			"MSI" : "MSIX");
 632	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
 633
 634	return rc;
 635}
 636
 637static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
 638				    int index, void(*handler)(void *))
 639{
 640	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 641	int relative_idx = index / cdev->num_hwfns;
 642
 643	hwfn->simd_proto_handler[relative_idx].func = handler;
 644	hwfn->simd_proto_handler[relative_idx].token = token;
 645}
 646
 647static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
 648{
 649	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
 650	int relative_idx = index / cdev->num_hwfns;
 651
 652	memset(&hwfn->simd_proto_handler[relative_idx], 0,
 653	       sizeof(struct qed_simd_fp_handler));
 654}
 655
 656static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
 657{
 658	tasklet_schedule((struct tasklet_struct *)tasklet);
 659	return IRQ_HANDLED;
 660}
 661
 662static irqreturn_t qed_single_int(int irq, void *dev_instance)
 663{
 664	struct qed_dev *cdev = (struct qed_dev *)dev_instance;
 665	struct qed_hwfn *hwfn;
 666	irqreturn_t rc = IRQ_NONE;
 667	u64 status;
 668	int i, j;
 669
 670	for (i = 0; i < cdev->num_hwfns; i++) {
 671		status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
 672
 673		if (!status)
 674			continue;
 675
 676		hwfn = &cdev->hwfns[i];
 677
 678		/* Slowpath interrupt */
 679		if (unlikely(status & 0x1)) {
 680			tasklet_schedule(hwfn->sp_dpc);
 681			status &= ~0x1;
 682			rc = IRQ_HANDLED;
 683		}
 684
 685		/* Fastpath interrupts */
 686		for (j = 0; j < 64; j++) {
 687			if ((0x2ULL << j) & status) {
 688				struct qed_simd_fp_handler *p_handler =
 689					&hwfn->simd_proto_handler[j];
 690
 691				if (p_handler->func)
 692					p_handler->func(p_handler->token);
 693				else
 694					DP_NOTICE(hwfn,
 695						  "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
 696						  j, status);
 697
 698				status &= ~(0x2ULL << j);
 699				rc = IRQ_HANDLED;
 700			}
 701		}
 702
 703		if (unlikely(status))
 704			DP_VERBOSE(hwfn, NETIF_MSG_INTR,
 705				   "got an unknown interrupt status 0x%llx\n",
 706				   status);
 707	}
 708
 709	return rc;
 710}
 711
 712int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
 713{
 714	struct qed_dev *cdev = hwfn->cdev;
 715	u32 int_mode;
 716	int rc = 0;
 717	u8 id;
 718
 719	int_mode = cdev->int_params.out.int_mode;
 720	if (int_mode == QED_INT_MODE_MSIX) {
 721		id = hwfn->my_id;
 722		snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
 723			 id, cdev->pdev->bus->number,
 724			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
 725		rc = request_irq(cdev->int_params.msix_table[id].vector,
 726				 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
 727	} else {
 728		unsigned long flags = 0;
 729
 730		snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
 731			 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
 732			 PCI_FUNC(cdev->pdev->devfn));
 733
 734		if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
 735			flags |= IRQF_SHARED;
 736
 737		rc = request_irq(cdev->pdev->irq, qed_single_int,
 738				 flags, cdev->name, cdev);
 739	}
 740
 741	if (rc)
 742		DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
 743	else
 744		DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
 745			   "Requested slowpath %s\n",
 746			   (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
 747
 748	return rc;
 749}
 750
 751static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
 752{
 753	/* Calling the disable function will make sure that any
 754	 * currently-running function is completed. The following call to the
 755	 * enable function makes this sequence a flush-like operation.
 756	 */
 757	if (p_hwfn->b_sp_dpc_enabled) {
 758		tasklet_disable(p_hwfn->sp_dpc);
 759		tasklet_enable(p_hwfn->sp_dpc);
 760	}
 761}
 762
 763void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
 764{
 765	struct qed_dev *cdev = p_hwfn->cdev;
 766	u8 id = p_hwfn->my_id;
 767	u32 int_mode;
 768
 769	int_mode = cdev->int_params.out.int_mode;
 770	if (int_mode == QED_INT_MODE_MSIX)
 771		synchronize_irq(cdev->int_params.msix_table[id].vector);
 772	else
 773		synchronize_irq(cdev->pdev->irq);
 774
 775	qed_slowpath_tasklet_flush(p_hwfn);
 776}
 777
 778static void qed_slowpath_irq_free(struct qed_dev *cdev)
 779{
 780	int i;
 781
 782	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 783		for_each_hwfn(cdev, i) {
 784			if (!cdev->hwfns[i].b_int_requested)
 785				break;
 786			synchronize_irq(cdev->int_params.msix_table[i].vector);
 787			free_irq(cdev->int_params.msix_table[i].vector,
 788				 cdev->hwfns[i].sp_dpc);
 789		}
 790	} else {
 791		if (QED_LEADING_HWFN(cdev)->b_int_requested)
 792			free_irq(cdev->pdev->irq, cdev);
 793	}
 794	qed_int_disable_post_isr_release(cdev);
 795}
 796
 797static int qed_nic_stop(struct qed_dev *cdev)
 798{
 799	int i, rc;
 800
 801	rc = qed_hw_stop(cdev);
 802
 803	for (i = 0; i < cdev->num_hwfns; i++) {
 804		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 805
 806		if (p_hwfn->b_sp_dpc_enabled) {
 807			tasklet_disable(p_hwfn->sp_dpc);
 808			p_hwfn->b_sp_dpc_enabled = false;
 809			DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
 810				   "Disabled sp tasklet [hwfn %d] at %p\n",
 811				   i, p_hwfn->sp_dpc);
 812		}
 813	}
 814
 815	qed_dbg_pf_exit(cdev);
 816
 817	return rc;
 818}
 819
 820static int qed_nic_setup(struct qed_dev *cdev)
 821{
 822	int rc, i;
 823
 824	/* Determine if interface is going to require LL2 */
 825	if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
 826		for (i = 0; i < cdev->num_hwfns; i++) {
 827			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 828
 829			p_hwfn->using_ll2 = true;
 830		}
 831	}
 832
 833	rc = qed_resc_alloc(cdev);
 834	if (rc)
 835		return rc;
 836
 837	DP_INFO(cdev, "Allocated qed resources\n");
 838
 839	qed_resc_setup(cdev);
 840
 841	return rc;
 842}
 843
 844static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
 845{
 846	int limit = 0;
 847
 848	/* Mark the fastpath as free/used */
 849	cdev->int_params.fp_initialized = cnt ? true : false;
 850
 851	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
 852		limit = cdev->num_hwfns * 63;
 853	else if (cdev->int_params.fp_msix_cnt)
 854		limit = cdev->int_params.fp_msix_cnt;
 855
 856	if (!limit)
 857		return -ENOMEM;
 858
 859	return min_t(int, cnt, limit);
 860}
 861
 862static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
 863{
 864	memset(info, 0, sizeof(struct qed_int_info));
 865
 866	if (!cdev->int_params.fp_initialized) {
 867		DP_INFO(cdev,
 868			"Protocol driver requested interrupt information, but its support is not yet configured\n");
 869		return -EINVAL;
 870	}
 871
 872	/* Need to expose only MSI-X information; Single IRQ is handled solely
 873	 * by qed.
 874	 */
 875	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 876		int msix_base = cdev->int_params.fp_msix_base;
 877
 878		info->msix_cnt = cdev->int_params.fp_msix_cnt;
 879		info->msix = &cdev->int_params.msix_table[msix_base];
 880	}
 881
 882	return 0;
 883}
 884
 885static int qed_slowpath_setup_int(struct qed_dev *cdev,
 886				  enum qed_int_mode int_mode)
 887{
 888	struct qed_sb_cnt_info sb_cnt_info;
 889	int num_l2_queues = 0;
 890	int rc;
 891	int i;
 892
 893	if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
 894		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
 895		return -EINVAL;
 896	}
 897
 898	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
 899	cdev->int_params.in.int_mode = int_mode;
 900	for_each_hwfn(cdev, i) {
 901		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
 902		qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
 903		cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
 904		cdev->int_params.in.num_vectors++; /* slowpath */
 905	}
 906
 907	/* We want a minimum of one slowpath and one fastpath vector per hwfn */
 908	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 909
 910	if (is_kdump_kernel()) {
 911		DP_INFO(cdev,
 912			"Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
 913			cdev->int_params.in.min_msix_cnt);
 914		cdev->int_params.in.num_vectors =
 915			cdev->int_params.in.min_msix_cnt;
 916	}
 917
 918	rc = qed_set_int_mode(cdev, false);
 919	if (rc)  {
 920		DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
 921		return rc;
 922	}
 923
 924	cdev->int_params.fp_msix_base = cdev->num_hwfns;
 925	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
 926				       cdev->num_hwfns;
 927
 928	if (!IS_ENABLED(CONFIG_QED_RDMA) ||
 929	    !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
 930		return 0;
 931
 932	for_each_hwfn(cdev, i)
 933		num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
 934
 935	DP_VERBOSE(cdev, QED_MSG_RDMA,
 936		   "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
 937		   cdev->int_params.fp_msix_cnt, num_l2_queues);
 938
 939	if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
 940		cdev->int_params.rdma_msix_cnt =
 941			(cdev->int_params.fp_msix_cnt - num_l2_queues)
 942			/ cdev->num_hwfns;
 943		cdev->int_params.rdma_msix_base =
 944			cdev->int_params.fp_msix_base + num_l2_queues;
 945		cdev->int_params.fp_msix_cnt = num_l2_queues;
 946	} else {
 947		cdev->int_params.rdma_msix_cnt = 0;
 948	}
 949
 950	DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
 951		   cdev->int_params.rdma_msix_cnt,
 952		   cdev->int_params.rdma_msix_base);
 953
 954	return 0;
 955}
 956
 957static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
 958{
 959	int rc;
 960
 961	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
 962	cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
 963
 964	qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
 965			    &cdev->int_params.in.num_vectors);
 966	if (cdev->num_hwfns > 1) {
 967		u8 vectors = 0;
 968
 969		qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
 970		cdev->int_params.in.num_vectors += vectors;
 971	}
 972
 973	/* We want a minimum of one fastpath vector per vf hwfn */
 974	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
 975
 976	rc = qed_set_int_mode(cdev, true);
 977	if (rc)
 978		return rc;
 979
 980	cdev->int_params.fp_msix_base = 0;
 981	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
 982
 983	return 0;
 984}
 985
 986u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
 987		   u8 *input_buf, u32 max_size, u8 *unzip_buf)
 988{
 989	int rc;
 990
 991	p_hwfn->stream->next_in = input_buf;
 992	p_hwfn->stream->avail_in = input_len;
 993	p_hwfn->stream->next_out = unzip_buf;
 994	p_hwfn->stream->avail_out = max_size;
 995
 996	rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
 997
 998	if (rc != Z_OK) {
 999		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1000			   rc);
1001		return 0;
1002	}
1003
1004	rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1005	zlib_inflateEnd(p_hwfn->stream);
1006
1007	if (rc != Z_OK && rc != Z_STREAM_END) {
1008		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1009			   p_hwfn->stream->msg, rc);
1010		return 0;
1011	}
1012
1013	return p_hwfn->stream->total_out / 4;
1014}
1015
1016static int qed_alloc_stream_mem(struct qed_dev *cdev)
1017{
1018	int i;
1019	void *workspace;
1020
1021	for_each_hwfn(cdev, i) {
1022		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1023
1024		p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1025		if (!p_hwfn->stream)
1026			return -ENOMEM;
1027
1028		workspace = vzalloc(zlib_inflate_workspacesize());
1029		if (!workspace)
1030			return -ENOMEM;
1031		p_hwfn->stream->workspace = workspace;
1032	}
1033
1034	return 0;
1035}
1036
1037static void qed_free_stream_mem(struct qed_dev *cdev)
1038{
1039	int i;
1040
1041	for_each_hwfn(cdev, i) {
1042		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1043
1044		if (!p_hwfn->stream)
1045			return;
1046
1047		vfree(p_hwfn->stream->workspace);
1048		kfree(p_hwfn->stream);
1049	}
1050}
1051
1052static void qed_update_pf_params(struct qed_dev *cdev,
1053				 struct qed_pf_params *params)
1054{
1055	int i;
1056
1057	if (IS_ENABLED(CONFIG_QED_RDMA)) {
1058		params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1059		params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1060		params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1061		/* divide by 3 the MRs to avoid MF ILT overflow */
1062		params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1063	}
1064
1065	if (cdev->num_hwfns > 1 || IS_VF(cdev))
1066		params->eth_pf_params.num_arfs_filters = 0;
1067
1068	/* In case we might support RDMA, don't allow qede to be greedy
1069	 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1070	 * per hwfn.
1071	 */
1072	if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1073		u16 *num_cons;
1074
1075		num_cons = &params->eth_pf_params.num_cons;
1076		*num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1077	}
1078
1079	for (i = 0; i < cdev->num_hwfns; i++) {
1080		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1081
1082		p_hwfn->pf_params = *params;
1083	}
1084}
1085
1086#define QED_PERIODIC_DB_REC_COUNT		10
1087#define QED_PERIODIC_DB_REC_INTERVAL_MS		100
1088#define QED_PERIODIC_DB_REC_INTERVAL \
1089	msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1090#define QED_PERIODIC_DB_REC_WAIT_COUNT		10
1091#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
1092	(QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
1093
1094static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1095				     enum qed_slowpath_wq_flag wq_flag,
1096				     unsigned long delay)
1097{
1098	if (!hwfn->slowpath_wq_active)
1099		return -EINVAL;
1100
1101	/* Memory barrier for setting atomic bit */
1102	smp_mb__before_atomic();
1103	set_bit(wq_flag, &hwfn->slowpath_task_flags);
 
1104	smp_mb__after_atomic();
1105	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1106
1107	return 0;
1108}
1109
1110void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1111{
1112	/* Reset periodic Doorbell Recovery counter */
1113	p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1114
1115	/* Don't schedule periodic Doorbell Recovery if already scheduled */
1116	if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1117		     &p_hwfn->slowpath_task_flags))
1118		return;
1119
1120	qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1121				  QED_PERIODIC_DB_REC_INTERVAL);
1122}
1123
1124static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1125{
1126	int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
1127
1128	if (IS_VF(cdev))
1129		return;
1130
1131	for_each_hwfn(cdev, i) {
1132		if (!cdev->hwfns[i].slowpath_wq)
1133			continue;
1134
1135		/* Stop queuing new delayed works */
1136		cdev->hwfns[i].slowpath_wq_active = false;
1137
1138		/* Wait until the last periodic doorbell recovery is executed */
1139		while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1140				&cdev->hwfns[i].slowpath_task_flags) &&
1141		       sleep_count--)
1142			msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
1143
1144		flush_workqueue(cdev->hwfns[i].slowpath_wq);
1145		destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1146	}
1147}
1148
1149static void qed_slowpath_task(struct work_struct *work)
1150{
1151	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1152					     slowpath_task.work);
1153	struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1154
1155	if (!ptt) {
1156		if (hwfn->slowpath_wq_active)
1157			queue_delayed_work(hwfn->slowpath_wq,
1158					   &hwfn->slowpath_task, 0);
1159
1160		return;
1161	}
1162
1163	if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1164			       &hwfn->slowpath_task_flags))
1165		qed_mfw_process_tlv_req(hwfn, ptt);
1166
1167	if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1168			       &hwfn->slowpath_task_flags)) {
 
 
 
 
1169		qed_db_rec_handler(hwfn, ptt);
1170		if (hwfn->periodic_db_rec_count--)
1171			qed_slowpath_delayed_work(hwfn,
1172						  QED_SLOWPATH_PERIODIC_DB_REC,
1173						  QED_PERIODIC_DB_REC_INTERVAL);
1174	}
1175
 
1176	qed_ptt_release(hwfn, ptt);
1177}
1178
1179static int qed_slowpath_wq_start(struct qed_dev *cdev)
1180{
1181	struct qed_hwfn *hwfn;
1182	char name[NAME_SIZE];
1183	int i;
1184
1185	if (IS_VF(cdev))
1186		return 0;
1187
1188	for_each_hwfn(cdev, i) {
1189		hwfn = &cdev->hwfns[i];
1190
1191		snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1192			 cdev->pdev->bus->number,
1193			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1194
1195		hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1196		if (!hwfn->slowpath_wq) {
1197			DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1198			return -ENOMEM;
1199		}
1200
1201		INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1202		hwfn->slowpath_wq_active = true;
1203	}
1204
1205	return 0;
1206}
1207
1208static int qed_slowpath_start(struct qed_dev *cdev,
1209			      struct qed_slowpath_params *params)
1210{
1211	struct qed_drv_load_params drv_load_params;
1212	struct qed_hw_init_params hw_init_params;
1213	struct qed_mcp_drv_version drv_version;
1214	struct qed_tunnel_info tunn_info;
1215	const u8 *data = NULL;
1216	struct qed_hwfn *hwfn;
1217	struct qed_ptt *p_ptt;
1218	int rc = -EINVAL;
1219
1220	if (qed_iov_wq_start(cdev))
1221		goto err;
1222
1223	if (qed_slowpath_wq_start(cdev))
1224		goto err;
1225
1226	if (IS_PF(cdev)) {
1227		rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1228				      &cdev->pdev->dev);
1229		if (rc) {
1230			DP_NOTICE(cdev,
1231				  "Failed to find fw file - /lib/firmware/%s\n",
1232				  QED_FW_FILE_NAME);
1233			goto err;
1234		}
1235
1236		if (cdev->num_hwfns == 1) {
1237			p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1238			if (p_ptt) {
1239				QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1240			} else {
1241				DP_NOTICE(cdev,
1242					  "Failed to acquire PTT for aRFS\n");
 
1243				goto err;
1244			}
1245		}
1246	}
1247
1248	cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1249	rc = qed_nic_setup(cdev);
1250	if (rc)
1251		goto err;
1252
1253	if (IS_PF(cdev))
1254		rc = qed_slowpath_setup_int(cdev, params->int_mode);
1255	else
1256		rc = qed_slowpath_vf_setup_int(cdev);
1257	if (rc)
1258		goto err1;
1259
1260	if (IS_PF(cdev)) {
1261		/* Allocate stream for unzipping */
1262		rc = qed_alloc_stream_mem(cdev);
1263		if (rc)
1264			goto err2;
1265
1266		/* First Dword used to differentiate between various sources */
1267		data = cdev->firmware->data + sizeof(u32);
1268
1269		qed_dbg_pf_init(cdev);
1270	}
1271
1272	/* Start the slowpath */
1273	memset(&hw_init_params, 0, sizeof(hw_init_params));
1274	memset(&tunn_info, 0, sizeof(tunn_info));
1275	tunn_info.vxlan.b_mode_enabled = true;
1276	tunn_info.l2_gre.b_mode_enabled = true;
1277	tunn_info.ip_gre.b_mode_enabled = true;
1278	tunn_info.l2_geneve.b_mode_enabled = true;
1279	tunn_info.ip_geneve.b_mode_enabled = true;
1280	tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1281	tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1282	tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1283	tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1284	tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1285	hw_init_params.p_tunn = &tunn_info;
1286	hw_init_params.b_hw_start = true;
1287	hw_init_params.int_mode = cdev->int_params.out.int_mode;
1288	hw_init_params.allow_npar_tx_switch = true;
1289	hw_init_params.bin_fw_data = data;
1290
1291	memset(&drv_load_params, 0, sizeof(drv_load_params));
1292	drv_load_params.is_crash_kernel = is_kdump_kernel();
1293	drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1294	drv_load_params.avoid_eng_reset = false;
1295	drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1296	hw_init_params.p_drv_load_params = &drv_load_params;
1297
1298	rc = qed_hw_init(cdev, &hw_init_params);
1299	if (rc)
1300		goto err2;
1301
1302	DP_INFO(cdev,
1303		"HW initialization and function start completed successfully\n");
1304
1305	if (IS_PF(cdev)) {
1306		cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1307					   BIT(QED_MODE_L2GENEVE_TUNN) |
1308					   BIT(QED_MODE_IPGENEVE_TUNN) |
1309					   BIT(QED_MODE_L2GRE_TUNN) |
1310					   BIT(QED_MODE_IPGRE_TUNN));
1311	}
1312
1313	/* Allocate LL2 interface if needed */
1314	if (QED_LEADING_HWFN(cdev)->using_ll2) {
1315		rc = qed_ll2_alloc_if(cdev);
1316		if (rc)
1317			goto err3;
1318	}
1319	if (IS_PF(cdev)) {
1320		hwfn = QED_LEADING_HWFN(cdev);
1321		drv_version.version = (params->drv_major << 24) |
1322				      (params->drv_minor << 16) |
1323				      (params->drv_rev << 8) |
1324				      (params->drv_eng);
1325		strlcpy(drv_version.name, params->name,
1326			MCP_DRV_VER_STR_SIZE - 4);
1327		rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1328					      &drv_version);
1329		if (rc) {
1330			DP_NOTICE(cdev, "Failed sending drv version command\n");
1331			goto err4;
1332		}
1333	}
1334
1335	qed_reset_vport_stats(cdev);
1336
1337	return 0;
1338
1339err4:
1340	qed_ll2_dealloc_if(cdev);
1341err3:
1342	qed_hw_stop(cdev);
1343err2:
1344	qed_hw_timers_stop_all(cdev);
1345	if (IS_PF(cdev))
1346		qed_slowpath_irq_free(cdev);
1347	qed_free_stream_mem(cdev);
1348	qed_disable_msix(cdev);
1349err1:
1350	qed_resc_free(cdev);
1351err:
1352	if (IS_PF(cdev))
1353		release_firmware(cdev->firmware);
1354
1355	if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1356	    QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1357		qed_ptt_release(QED_LEADING_HWFN(cdev),
1358				QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1359
1360	qed_iov_wq_stop(cdev, false);
1361
1362	qed_slowpath_wq_stop(cdev);
1363
1364	return rc;
1365}
1366
1367static int qed_slowpath_stop(struct qed_dev *cdev)
1368{
1369	if (!cdev)
1370		return -ENODEV;
1371
1372	qed_slowpath_wq_stop(cdev);
1373
1374	qed_ll2_dealloc_if(cdev);
1375
1376	if (IS_PF(cdev)) {
1377		if (cdev->num_hwfns == 1)
1378			qed_ptt_release(QED_LEADING_HWFN(cdev),
1379					QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1380		qed_free_stream_mem(cdev);
1381		if (IS_QED_ETH_IF(cdev))
1382			qed_sriov_disable(cdev, true);
1383	}
1384
1385	qed_nic_stop(cdev);
1386
1387	if (IS_PF(cdev))
1388		qed_slowpath_irq_free(cdev);
1389
1390	qed_disable_msix(cdev);
1391
1392	qed_resc_free(cdev);
1393
1394	qed_iov_wq_stop(cdev, true);
1395
1396	if (IS_PF(cdev))
1397		release_firmware(cdev->firmware);
1398
1399	return 0;
1400}
1401
1402static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1403{
1404	int i;
1405
1406	memcpy(cdev->name, name, NAME_SIZE);
1407	for_each_hwfn(cdev, i)
1408		snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1409}
1410
1411static u32 qed_sb_init(struct qed_dev *cdev,
1412		       struct qed_sb_info *sb_info,
1413		       void *sb_virt_addr,
1414		       dma_addr_t sb_phy_addr, u16 sb_id,
1415		       enum qed_sb_type type)
1416{
1417	struct qed_hwfn *p_hwfn;
1418	struct qed_ptt *p_ptt;
1419	u16 rel_sb_id;
1420	u32 rc;
1421
1422	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1423	if (type == QED_SB_TYPE_L2_QUEUE) {
1424		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1425		rel_sb_id = sb_id / cdev->num_hwfns;
1426	} else {
1427		p_hwfn = QED_AFFIN_HWFN(cdev);
1428		rel_sb_id = sb_id;
1429	}
1430
1431	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1432		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1433		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1434
1435	if (IS_PF(p_hwfn->cdev)) {
1436		p_ptt = qed_ptt_acquire(p_hwfn);
1437		if (!p_ptt)
1438			return -EBUSY;
1439
1440		rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1441				     sb_phy_addr, rel_sb_id);
1442		qed_ptt_release(p_hwfn, p_ptt);
1443	} else {
1444		rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1445				     sb_phy_addr, rel_sb_id);
1446	}
1447
1448	return rc;
1449}
1450
1451static u32 qed_sb_release(struct qed_dev *cdev,
1452			  struct qed_sb_info *sb_info,
1453			  u16 sb_id,
1454			  enum qed_sb_type type)
1455{
1456	struct qed_hwfn *p_hwfn;
1457	u16 rel_sb_id;
1458	u32 rc;
1459
1460	/* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1461	if (type == QED_SB_TYPE_L2_QUEUE) {
1462		p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1463		rel_sb_id = sb_id / cdev->num_hwfns;
1464	} else {
1465		p_hwfn = QED_AFFIN_HWFN(cdev);
1466		rel_sb_id = sb_id;
1467	}
1468
1469	DP_VERBOSE(cdev, NETIF_MSG_INTR,
1470		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1471		   IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1472
1473	rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1474
1475	return rc;
1476}
1477
1478static bool qed_can_link_change(struct qed_dev *cdev)
1479{
1480	return true;
1481}
1482
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1483static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1484{
 
 
 
1485	struct qed_hwfn *hwfn;
1486	struct qed_mcp_link_params *link_params;
1487	struct qed_ptt *ptt;
1488	u32 sup_caps;
1489	int rc;
 
1490
1491	if (!cdev)
1492		return -ENODEV;
1493
1494	/* The link should be set only once per PF */
1495	hwfn = &cdev->hwfns[0];
1496
1497	/* When VF wants to set link, force it to read the bulletin instead.
1498	 * This mimics the PF behavior, where a noitification [both immediate
1499	 * and possible later] would be generated when changing properties.
1500	 */
1501	if (IS_VF(cdev)) {
1502		qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1503		return 0;
1504	}
1505
1506	ptt = qed_ptt_acquire(hwfn);
1507	if (!ptt)
1508		return -EBUSY;
1509
1510	link_params = qed_mcp_get_link_params(hwfn);
 
 
 
 
 
1511	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1512		link_params->speed.autoneg = params->autoneg;
 
1513	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1514		link_params->speed.advertised_speeds = 0;
1515		sup_caps = QED_LM_1000baseT_Full_BIT |
1516			   QED_LM_1000baseKX_Full_BIT |
1517			   QED_LM_1000baseX_Full_BIT;
1518		if (params->adv_speeds & sup_caps)
1519			link_params->speed.advertised_speeds |=
1520			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1521		sup_caps = QED_LM_10000baseT_Full_BIT |
1522			   QED_LM_10000baseKR_Full_BIT |
1523			   QED_LM_10000baseKX4_Full_BIT |
1524			   QED_LM_10000baseR_FEC_BIT |
1525			   QED_LM_10000baseCR_Full_BIT |
1526			   QED_LM_10000baseSR_Full_BIT |
1527			   QED_LM_10000baseLR_Full_BIT |
1528			   QED_LM_10000baseLRM_Full_BIT;
1529		if (params->adv_speeds & sup_caps)
1530			link_params->speed.advertised_speeds |=
1531			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1532		if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
1533			link_params->speed.advertised_speeds |=
1534				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1535		sup_caps = QED_LM_25000baseKR_Full_BIT |
1536			   QED_LM_25000baseCR_Full_BIT |
1537			   QED_LM_25000baseSR_Full_BIT;
1538		if (params->adv_speeds & sup_caps)
1539			link_params->speed.advertised_speeds |=
1540			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1541		sup_caps = QED_LM_40000baseLR4_Full_BIT |
1542			   QED_LM_40000baseKR4_Full_BIT |
1543			   QED_LM_40000baseCR4_Full_BIT |
1544			   QED_LM_40000baseSR4_Full_BIT;
1545		if (params->adv_speeds & sup_caps)
1546			link_params->speed.advertised_speeds |=
1547				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1548		sup_caps = QED_LM_50000baseKR2_Full_BIT |
1549			   QED_LM_50000baseCR2_Full_BIT |
1550			   QED_LM_50000baseSR2_Full_BIT;
1551		if (params->adv_speeds & sup_caps)
1552			link_params->speed.advertised_speeds |=
1553			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1554		sup_caps = QED_LM_100000baseKR4_Full_BIT |
1555			   QED_LM_100000baseSR4_Full_BIT |
1556			   QED_LM_100000baseCR4_Full_BIT |
1557			   QED_LM_100000baseLR4_ER4_Full_BIT;
1558		if (params->adv_speeds & sup_caps)
1559			link_params->speed.advertised_speeds |=
1560			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1561	}
 
1562	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1563		link_params->speed.forced_speed = params->forced_speed;
 
 
 
 
1564	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1565		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1566			link_params->pause.autoneg = true;
1567		else
1568			link_params->pause.autoneg = false;
1569		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1570			link_params->pause.forced_rx = true;
1571		else
1572			link_params->pause.forced_rx = false;
1573		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1574			link_params->pause.forced_tx = true;
1575		else
1576			link_params->pause.forced_tx = false;
1577	}
 
1578	if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1579		switch (params->loopback_mode) {
1580		case QED_LINK_LOOPBACK_INT_PHY:
1581			link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1582			break;
1583		case QED_LINK_LOOPBACK_EXT_PHY:
1584			link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1585			break;
1586		case QED_LINK_LOOPBACK_EXT:
1587			link_params->loopback_mode = ETH_LOOPBACK_EXT;
1588			break;
1589		case QED_LINK_LOOPBACK_MAC:
1590			link_params->loopback_mode = ETH_LOOPBACK_MAC;
1591			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1592		default:
1593			link_params->loopback_mode = ETH_LOOPBACK_NONE;
1594			break;
1595		}
1596	}
1597
1598	if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1599		memcpy(&link_params->eee, &params->eee,
1600		       sizeof(link_params->eee));
1601
 
 
 
1602	rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1603
1604	qed_ptt_release(hwfn, ptt);
1605
1606	return rc;
1607}
1608
1609static int qed_get_port_type(u32 media_type)
1610{
1611	int port_type;
1612
1613	switch (media_type) {
1614	case MEDIA_SFPP_10G_FIBER:
1615	case MEDIA_SFP_1G_FIBER:
1616	case MEDIA_XFP_FIBER:
1617	case MEDIA_MODULE_FIBER:
1618	case MEDIA_KR:
1619		port_type = PORT_FIBRE;
1620		break;
1621	case MEDIA_DA_TWINAX:
1622		port_type = PORT_DA;
1623		break;
1624	case MEDIA_BASE_T:
1625		port_type = PORT_TP;
1626		break;
 
1627	case MEDIA_NOT_PRESENT:
1628		port_type = PORT_NONE;
1629		break;
1630	case MEDIA_UNSPECIFIED:
1631	default:
1632		port_type = PORT_OTHER;
1633		break;
1634	}
1635	return port_type;
1636}
1637
1638static int qed_get_link_data(struct qed_hwfn *hwfn,
1639			     struct qed_mcp_link_params *params,
1640			     struct qed_mcp_link_state *link,
1641			     struct qed_mcp_link_capabilities *link_caps)
1642{
1643	void *p;
1644
1645	if (!IS_PF(hwfn->cdev)) {
1646		qed_vf_get_link_params(hwfn, params);
1647		qed_vf_get_link_state(hwfn, link);
1648		qed_vf_get_link_caps(hwfn, link_caps);
1649
1650		return 0;
1651	}
1652
1653	p = qed_mcp_get_link_params(hwfn);
1654	if (!p)
1655		return -ENXIO;
1656	memcpy(params, p, sizeof(*params));
1657
1658	p = qed_mcp_get_link_state(hwfn);
1659	if (!p)
1660		return -ENXIO;
1661	memcpy(link, p, sizeof(*link));
1662
1663	p = qed_mcp_get_link_capabilities(hwfn);
1664	if (!p)
1665		return -ENXIO;
1666	memcpy(link_caps, p, sizeof(*link_caps));
1667
1668	return 0;
1669}
1670
1671static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1672				     struct qed_ptt *ptt, u32 capability,
1673				     u32 *if_capability)
1674{
1675	u32 media_type, tcvr_state, tcvr_type;
1676	u32 speed_mask, board_cfg;
1677
1678	if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1679		media_type = MEDIA_UNSPECIFIED;
1680
1681	if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1682		tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1683
1684	if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1685		speed_mask = 0xFFFFFFFF;
1686
1687	if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1688		board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1689
1690	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1691		   "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1692		   media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1693
1694	switch (media_type) {
1695	case MEDIA_DA_TWINAX:
1696		*if_capability |= QED_LM_FIBRE_BIT;
 
1697		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1698			*if_capability |= QED_LM_20000baseKR2_Full_BIT;
1699		/* For DAC media multiple speed capabilities are supported*/
1700		capability = capability & speed_mask;
 
 
1701		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1702			*if_capability |= QED_LM_1000baseKX_Full_BIT;
1703		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1704			*if_capability |= QED_LM_10000baseCR_Full_BIT;
 
1705		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1706			*if_capability |= QED_LM_40000baseCR4_Full_BIT;
 
 
 
 
 
 
 
 
 
1707		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1708			*if_capability |= QED_LM_25000baseCR_Full_BIT;
1709		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1710			*if_capability |= QED_LM_50000baseCR2_Full_BIT;
 
1711		if (capability &
1712			NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1713			*if_capability |= QED_LM_100000baseCR4_Full_BIT;
 
 
 
 
 
 
 
 
1714		break;
1715	case MEDIA_BASE_T:
1716		*if_capability |= QED_LM_TP_BIT;
 
1717		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1718			if (capability &
1719			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1720				*if_capability |= QED_LM_1000baseT_Full_BIT;
1721			}
1722			if (capability &
1723			    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1724				*if_capability |= QED_LM_10000baseT_Full_BIT;
1725			}
1726		}
 
1727		if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1728			*if_capability |= QED_LM_FIBRE_BIT;
1729			if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
1730				*if_capability |= QED_LM_1000baseT_Full_BIT;
1731			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
1732				*if_capability |= QED_LM_10000baseT_Full_BIT;
 
 
 
 
 
 
 
1733		}
 
1734		break;
1735	case MEDIA_SFP_1G_FIBER:
1736	case MEDIA_SFPP_10G_FIBER:
1737	case MEDIA_XFP_FIBER:
1738	case MEDIA_MODULE_FIBER:
1739		*if_capability |= QED_LM_FIBRE_BIT;
1740		if (capability &
1741		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1742			if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
1743			    (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
1744				*if_capability |= QED_LM_1000baseKX_Full_BIT;
1745		}
1746		if (capability &
1747		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1748			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
1749				*if_capability |= QED_LM_10000baseSR_Full_BIT;
1750			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
1751				*if_capability |= QED_LM_10000baseLR_Full_BIT;
1752			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
1753				*if_capability |= QED_LM_10000baseLRM_Full_BIT;
1754			if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
1755				*if_capability |= QED_LM_10000baseR_FEC_BIT;
1756		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1757		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1758			*if_capability |= QED_LM_20000baseKR2_Full_BIT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1759		if (capability &
1760		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
1761			if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
1762				*if_capability |= QED_LM_25000baseSR_Full_BIT;
1763		}
1764		if (capability &
1765		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
1766			if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
1767				*if_capability |= QED_LM_40000baseLR4_Full_BIT;
1768			if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
1769				*if_capability |= QED_LM_40000baseSR4_Full_BIT;
1770		}
1771		if (capability &
1772		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1773			*if_capability |= QED_LM_50000baseKR2_Full_BIT;
1774		if (capability &
1775		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
1776			if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
1777				*if_capability |= QED_LM_100000baseSR4_Full_BIT;
1778		}
1779
1780		break;
1781	case MEDIA_KR:
1782		*if_capability |= QED_LM_Backplane_BIT;
 
1783		if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1784			*if_capability |= QED_LM_20000baseKR2_Full_BIT;
1785		if (capability &
1786		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1787			*if_capability |= QED_LM_1000baseKX_Full_BIT;
1788		if (capability &
1789		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1790			*if_capability |= QED_LM_10000baseKR_Full_BIT;
1791		if (capability &
1792		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1793			*if_capability |= QED_LM_25000baseKR_Full_BIT;
1794		if (capability &
1795		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1796			*if_capability |= QED_LM_40000baseKR4_Full_BIT;
1797		if (capability &
1798		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1799			*if_capability |= QED_LM_50000baseKR2_Full_BIT;
1800		if (capability &
1801		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1802			*if_capability |= QED_LM_100000baseKR4_Full_BIT;
 
1803		break;
1804	case MEDIA_UNSPECIFIED:
1805	case MEDIA_NOT_PRESENT:
 
1806		DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
1807			   "Unknown media and transceiver type;\n");
1808		break;
1809	}
1810}
1811
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1812static void qed_fill_link(struct qed_hwfn *hwfn,
1813			  struct qed_ptt *ptt,
1814			  struct qed_link_output *if_link)
1815{
1816	struct qed_mcp_link_capabilities link_caps;
1817	struct qed_mcp_link_params params;
1818	struct qed_mcp_link_state link;
1819	u32 media_type;
1820
1821	memset(if_link, 0, sizeof(*if_link));
1822
1823	/* Prepare source inputs */
1824	if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
1825		dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1826		return;
1827	}
1828
1829	/* Set the link parameters to pass to protocol driver */
1830	if (link.link_up)
1831		if_link->link_up = true;
1832
1833	/* TODO - at the moment assume supported and advertised speed equal */
1834	if (link_caps.default_speed_autoneg)
1835		if_link->supported_caps |= QED_LM_Autoneg_BIT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1836	if (params.pause.autoneg ||
1837	    (params.pause.forced_rx && params.pause.forced_tx))
1838		if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1839	if (params.pause.autoneg || params.pause.forced_rx ||
1840	    params.pause.forced_tx)
1841		if_link->supported_caps |= QED_LM_Pause_BIT;
1842
1843	if_link->advertised_caps = if_link->supported_caps;
1844	if (params.speed.autoneg)
1845		if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1846	else
1847		if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1848
1849	/* Fill link advertised capability*/
1850	qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
1851				 &if_link->advertised_caps);
1852	/* Fill link supported capability*/
 
1853	qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
1854				 &if_link->supported_caps);
 
 
 
 
1855
1856	if (link.link_up)
1857		if_link->speed = link.speed;
1858
1859	/* TODO - fill duplex properly */
1860	if_link->duplex = DUPLEX_FULL;
1861	qed_mcp_get_media_type(hwfn, ptt, &media_type);
1862	if_link->port = qed_get_port_type(media_type);
1863
1864	if_link->autoneg = params.speed.autoneg;
1865
1866	if (params.pause.autoneg)
1867		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1868	if (params.pause.forced_rx)
1869		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1870	if (params.pause.forced_tx)
1871		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1872
1873	/* Link partner capabilities */
1874	if (link.partner_adv_speed &
1875	    QED_LINK_PARTNER_SPEED_1G_FD)
1876		if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1877	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1878		if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1879	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
1880		if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
1881	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1882		if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1883	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1884		if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1885	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1886		if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1887	if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1888		if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1889
1890	if (link.an_complete)
1891		if_link->lp_caps |= QED_LM_Autoneg_BIT;
1892
1893	if (link.partner_adv_pause)
1894		if_link->lp_caps |= QED_LM_Pause_BIT;
1895	if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1896	    link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1897		if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1898
1899	if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1900		if_link->eee_supported = false;
1901	} else {
1902		if_link->eee_supported = true;
1903		if_link->eee_active = link.eee_active;
1904		if_link->sup_caps = link_caps.eee_speed_caps;
1905		/* MFW clears adv_caps on eee disable; use configured value */
1906		if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1907					params.eee.adv_caps;
1908		if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1909		if_link->eee.enable = params.eee.enable;
1910		if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1911		if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1912	}
1913}
1914
1915static void qed_get_current_link(struct qed_dev *cdev,
1916				 struct qed_link_output *if_link)
1917{
1918	struct qed_hwfn *hwfn;
1919	struct qed_ptt *ptt;
1920	int i;
1921
1922	hwfn = &cdev->hwfns[0];
1923	if (IS_PF(cdev)) {
1924		ptt = qed_ptt_acquire(hwfn);
1925		if (ptt) {
1926			qed_fill_link(hwfn, ptt, if_link);
1927			qed_ptt_release(hwfn, ptt);
1928		} else {
1929			DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1930		}
1931	} else {
1932		qed_fill_link(hwfn, NULL, if_link);
1933	}
1934
1935	for_each_hwfn(cdev, i)
1936		qed_inform_vf_link_state(&cdev->hwfns[i]);
1937}
1938
1939void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1940{
1941	void *cookie = hwfn->cdev->ops_cookie;
1942	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1943	struct qed_link_output if_link;
1944
1945	qed_fill_link(hwfn, ptt, &if_link);
1946	qed_inform_vf_link_state(hwfn);
1947
1948	if (IS_LEAD_HWFN(hwfn) && cookie)
1949		op->link_update(cookie, &if_link);
1950}
1951
 
 
 
 
 
 
 
 
 
1952static int qed_drain(struct qed_dev *cdev)
1953{
1954	struct qed_hwfn *hwfn;
1955	struct qed_ptt *ptt;
1956	int i, rc;
1957
1958	if (IS_VF(cdev))
1959		return 0;
1960
1961	for_each_hwfn(cdev, i) {
1962		hwfn = &cdev->hwfns[i];
1963		ptt = qed_ptt_acquire(hwfn);
1964		if (!ptt) {
1965			DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1966			return -EBUSY;
1967		}
1968		rc = qed_mcp_drain(hwfn, ptt);
1969		qed_ptt_release(hwfn, ptt);
1970		if (rc)
1971			return rc;
1972	}
1973
1974	return 0;
1975}
1976
1977static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1978					  struct qed_nvm_image_att *nvm_image,
1979					  u32 *crc)
1980{
1981	u8 *buf = NULL;
1982	int rc, j;
1983	u32 val;
1984
1985	/* Allocate a buffer for holding the nvram image */
1986	buf = kzalloc(nvm_image->length, GFP_KERNEL);
1987	if (!buf)
1988		return -ENOMEM;
1989
1990	/* Read image into buffer */
1991	rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
1992			      buf, nvm_image->length);
1993	if (rc) {
1994		DP_ERR(cdev, "Failed reading image from nvm\n");
1995		goto out;
1996	}
1997
1998	/* Convert the buffer into big-endian format (excluding the
1999	 * closing 4 bytes of CRC).
2000	 */
2001	for (j = 0; j < nvm_image->length - 4; j += 4) {
2002		val = cpu_to_be32(*(u32 *)&buf[j]);
2003		*(u32 *)&buf[j] = val;
2004	}
2005
2006	/* Calc CRC for the "actual" image buffer, i.e. not including
2007	 * the last 4 CRC bytes.
2008	 */
2009	*crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
 
2010
2011out:
2012	kfree(buf);
2013
2014	return rc;
2015}
2016
2017/* Binary file format -
2018 *     /----------------------------------------------------------------------\
2019 * 0B  |                       0x4 [command index]                            |
2020 * 4B  | image_type     | Options        |  Number of register settings       |
2021 * 8B  |                       Value                                          |
2022 * 12B |                       Mask                                           |
2023 * 16B |                       Offset                                         |
2024 *     \----------------------------------------------------------------------/
2025 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2026 * Options - 0'b - Calculate & Update CRC for image
2027 */
2028static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2029				      bool *check_resp)
2030{
2031	struct qed_nvm_image_att nvm_image;
2032	struct qed_hwfn *p_hwfn;
2033	bool is_crc = false;
2034	u32 image_type;
2035	int rc = 0, i;
2036	u16 len;
2037
2038	*data += 4;
2039	image_type = **data;
2040	p_hwfn = QED_LEADING_HWFN(cdev);
2041	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2042		if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2043			break;
2044	if (i == p_hwfn->nvm_info.num_images) {
2045		DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2046		       image_type);
2047		return -ENOENT;
2048	}
2049
2050	nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2051	nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2052
2053	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2054		   "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2055		   **data, image_type, nvm_image.start_addr,
2056		   nvm_image.start_addr + nvm_image.length - 1);
2057	(*data)++;
2058	is_crc = !!(**data & BIT(0));
2059	(*data)++;
2060	len = *((u16 *)*data);
2061	*data += 2;
2062	if (is_crc) {
2063		u32 crc = 0;
2064
2065		rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2066		if (rc) {
2067			DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2068			goto exit;
2069		}
2070
2071		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2072				       (nvm_image.start_addr +
2073					nvm_image.length - 4), (u8 *)&crc, 4);
2074		if (rc)
2075			DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2076			       nvm_image.start_addr + nvm_image.length - 4, rc);
2077		goto exit;
2078	}
2079
2080	/* Iterate over the values for setting */
2081	while (len) {
2082		u32 offset, mask, value, cur_value;
2083		u8 buf[4];
2084
2085		value = *((u32 *)*data);
2086		*data += 4;
2087		mask = *((u32 *)*data);
2088		*data += 4;
2089		offset = *((u32 *)*data);
2090		*data += 4;
2091
2092		rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2093				      4);
2094		if (rc) {
2095			DP_ERR(cdev, "Failed reading from %08x\n",
2096			       nvm_image.start_addr + offset);
2097			goto exit;
2098		}
2099
2100		cur_value = le32_to_cpu(*((__le32 *)buf));
2101		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2102			   "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2103			   nvm_image.start_addr + offset, cur_value,
2104			   (cur_value & ~mask) | (value & mask), value, mask);
2105		value = (value & mask) | (cur_value & ~mask);
2106		rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2107				       nvm_image.start_addr + offset,
2108				       (u8 *)&value, 4);
2109		if (rc) {
2110			DP_ERR(cdev, "Failed writing to %08x\n",
2111			       nvm_image.start_addr + offset);
2112			goto exit;
2113		}
2114
2115		len--;
2116	}
2117exit:
2118	return rc;
2119}
2120
2121/* Binary file format -
2122 *     /----------------------------------------------------------------------\
2123 * 0B  |                       0x3 [command index]                            |
2124 * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2125 * 8B  | File-type |                   reserved                               |
2126 * 12B |                    Image length in bytes                             |
2127 *     \----------------------------------------------------------------------/
2128 *     Start a new file of the provided type
2129 */
2130static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2131					  const u8 **data, bool *check_resp)
2132{
2133	u32 file_type, file_size = 0;
2134	int rc;
2135
2136	*data += 4;
2137	*check_resp = !!(**data & BIT(0));
2138	*data += 4;
2139	file_type = **data;
2140
2141	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2142		   "About to start a new file of type %02x\n", file_type);
2143	if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2144		*data += 4;
2145		file_size = *((u32 *)(*data));
2146	}
2147
2148	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2149			       (u8 *)(&file_size), 4);
2150	*data += 4;
2151
2152	return rc;
2153}
2154
2155/* Binary file format -
2156 *     /----------------------------------------------------------------------\
2157 * 0B  |                       0x2 [command index]                            |
2158 * 4B  |                       Length in bytes                                |
2159 * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2160 * 12B |                       Offset in bytes                                |
2161 * 16B |                       Data ...                                       |
2162 *     \----------------------------------------------------------------------/
2163 *     Write data as part of a file that was previously started. Data should be
2164 *     of length equal to that provided in the message
2165 */
2166static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2167					 const u8 **data, bool *check_resp)
2168{
2169	u32 offset, len;
2170	int rc;
2171
2172	*data += 4;
2173	len = *((u32 *)(*data));
2174	*data += 4;
2175	*check_resp = !!(**data & BIT(0));
2176	*data += 4;
2177	offset = *((u32 *)(*data));
2178	*data += 4;
2179
2180	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2181		   "About to write File-data: %08x bytes to offset %08x\n",
2182		   len, offset);
2183
2184	rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2185			       (char *)(*data), len);
2186	*data += len;
2187
2188	return rc;
2189}
2190
2191/* Binary file format [General header] -
2192 *     /----------------------------------------------------------------------\
2193 * 0B  |                       QED_NVM_SIGNATURE                              |
2194 * 4B  |                       Length in bytes                                |
2195 * 8B  | Highest command in this batchfile |          Reserved                |
2196 *     \----------------------------------------------------------------------/
2197 */
2198static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2199					const struct firmware *image,
2200					const u8 **data)
2201{
2202	u32 signature, len;
2203
2204	/* Check minimum size */
2205	if (image->size < 12) {
2206		DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2207		return -EINVAL;
2208	}
2209
2210	/* Check signature */
2211	signature = *((u32 *)(*data));
2212	if (signature != QED_NVM_SIGNATURE) {
2213		DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2214		return -EINVAL;
2215	}
2216
2217	*data += 4;
2218	/* Validate internal size equals the image-size */
2219	len = *((u32 *)(*data));
2220	if (len != image->size) {
2221		DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2222		       len, (u32)image->size);
2223		return -EINVAL;
2224	}
2225
2226	*data += 4;
2227	/* Make sure driver familiar with all commands necessary for this */
2228	if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2229		DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2230		       *((u16 *)(*data)));
2231		return -EINVAL;
2232	}
2233
2234	*data += 4;
2235
2236	return 0;
2237}
2238
2239/* Binary file format -
2240 *     /----------------------------------------------------------------------\
2241 * 0B  |                       0x5 [command index]                            |
2242 * 4B  | Number of config attributes     |          Reserved                  |
2243 * 4B  | Config ID                       | Entity ID      | Length            |
2244 * 4B  | Value                                                                |
2245 *     |                                                                      |
2246 *     \----------------------------------------------------------------------/
2247 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2248 * 'Number of config attributes'.
2249 *
2250 * The API parses config attributes from the user provided buffer and flashes
2251 * them to the respective NVM path using Management FW inerface.
2252 */
2253static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2254{
2255	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2256	u8 entity_id, len, buf[32];
2257	bool need_nvm_init = true;
2258	struct qed_ptt *ptt;
2259	u16 cfg_id, count;
2260	int rc = 0, i;
2261	u32 flags;
2262
2263	ptt = qed_ptt_acquire(hwfn);
2264	if (!ptt)
2265		return -EAGAIN;
2266
2267	/* NVM CFG ID attribute header */
2268	*data += 4;
2269	count = *((u16 *)*data);
2270	*data += 4;
2271
2272	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2273		   "Read config ids: num_attrs = %0d\n", count);
2274	/* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2275	 * arithmetic operations in the implementation.
2276	 */
2277	for (i = 1; i <= count; i++) {
2278		cfg_id = *((u16 *)*data);
2279		*data += 2;
2280		entity_id = **data;
2281		(*data)++;
2282		len = **data;
2283		(*data)++;
2284		memcpy(buf, *data, len);
2285		*data += len;
2286
2287		flags = 0;
2288		if (need_nvm_init) {
2289			flags |= QED_NVM_CFG_OPTION_INIT;
2290			need_nvm_init = false;
2291		}
2292
2293		/* Commit to flash and free the resources */
2294		if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2295			flags |= QED_NVM_CFG_OPTION_COMMIT |
2296				 QED_NVM_CFG_OPTION_FREE;
2297			need_nvm_init = true;
2298		}
2299
2300		if (entity_id)
2301			flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2302
2303		DP_VERBOSE(cdev, NETIF_MSG_DRV,
2304			   "cfg_id = %d entity = %d len = %d\n", cfg_id,
2305			   entity_id, len);
2306		rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2307					 buf, len);
2308		if (rc) {
2309			DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2310			break;
2311		}
2312	}
2313
2314	qed_ptt_release(hwfn, ptt);
2315
2316	return rc;
2317}
2318
2319#define QED_MAX_NVM_BUF_LEN	32
2320static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2321{
2322	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2323	u8 buf[QED_MAX_NVM_BUF_LEN];
2324	struct qed_ptt *ptt;
2325	u32 len;
2326	int rc;
2327
2328	ptt = qed_ptt_acquire(hwfn);
2329	if (!ptt)
2330		return QED_MAX_NVM_BUF_LEN;
2331
2332	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2333				 &len);
2334	if (rc || !len) {
2335		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2336		len = QED_MAX_NVM_BUF_LEN;
2337	}
2338
2339	qed_ptt_release(hwfn, ptt);
2340
2341	return len;
2342}
2343
2344static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2345				  u32 cmd, u32 entity_id)
2346{
2347	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2348	struct qed_ptt *ptt;
2349	u32 flags, len;
2350	int rc = 0;
2351
2352	ptt = qed_ptt_acquire(hwfn);
2353	if (!ptt)
2354		return -EAGAIN;
2355
2356	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2357		   "Read config cmd = %d entity id %d\n", cmd, entity_id);
2358	flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2359	rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2360	if (rc)
2361		DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2362
2363	qed_ptt_release(hwfn, ptt);
2364
2365	return rc;
2366}
2367
2368static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2369{
2370	const struct firmware *image;
2371	const u8 *data, *data_end;
2372	u32 cmd_type;
2373	int rc;
2374
2375	rc = request_firmware(&image, name, &cdev->pdev->dev);
2376	if (rc) {
2377		DP_ERR(cdev, "Failed to find '%s'\n", name);
2378		return rc;
2379	}
2380
2381	DP_VERBOSE(cdev, NETIF_MSG_DRV,
2382		   "Flashing '%s' - firmware's data at %p, size is %08x\n",
2383		   name, image->data, (u32)image->size);
2384	data = image->data;
2385	data_end = data + image->size;
2386
2387	rc = qed_nvm_flash_image_validate(cdev, image, &data);
2388	if (rc)
2389		goto exit;
2390
2391	while (data < data_end) {
2392		bool check_resp = false;
2393
2394		/* Parse the actual command */
2395		cmd_type = *((u32 *)data);
2396		switch (cmd_type) {
2397		case QED_NVM_FLASH_CMD_FILE_DATA:
2398			rc = qed_nvm_flash_image_file_data(cdev, &data,
2399							   &check_resp);
2400			break;
2401		case QED_NVM_FLASH_CMD_FILE_START:
2402			rc = qed_nvm_flash_image_file_start(cdev, &data,
2403							    &check_resp);
2404			break;
2405		case QED_NVM_FLASH_CMD_NVM_CHANGE:
2406			rc = qed_nvm_flash_image_access(cdev, &data,
2407							&check_resp);
2408			break;
2409		case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2410			rc = qed_nvm_flash_cfg_write(cdev, &data);
2411			break;
2412		default:
2413			DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2414			rc = -EINVAL;
2415			goto exit;
2416		}
2417
2418		if (rc) {
2419			DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2420			goto exit;
2421		}
2422
2423		/* Check response if needed */
2424		if (check_resp) {
2425			u32 mcp_response = 0;
2426
2427			if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2428				DP_ERR(cdev, "Failed getting MCP response\n");
2429				rc = -EINVAL;
2430				goto exit;
2431			}
2432
2433			switch (mcp_response & FW_MSG_CODE_MASK) {
2434			case FW_MSG_CODE_OK:
2435			case FW_MSG_CODE_NVM_OK:
2436			case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2437			case FW_MSG_CODE_PHY_OK:
2438				break;
2439			default:
2440				DP_ERR(cdev, "MFW returns error: %08x\n",
2441				       mcp_response);
2442				rc = -EINVAL;
2443				goto exit;
2444			}
2445		}
2446	}
2447
2448exit:
2449	release_firmware(image);
2450
2451	return rc;
2452}
2453
2454static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2455			     u8 *buf, u16 len)
2456{
2457	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2458
2459	return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2460}
2461
2462void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2463{
2464	struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2465	void *cookie = p_hwfn->cdev->ops_cookie;
2466
2467	if (ops && ops->schedule_recovery_handler)
2468		ops->schedule_recovery_handler(cookie);
2469}
2470
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2471static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2472			    void *handle)
2473{
2474		return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2475}
2476
2477static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2478{
2479	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2480	struct qed_ptt *ptt;
2481	int status = 0;
2482
2483	ptt = qed_ptt_acquire(hwfn);
2484	if (!ptt)
2485		return -EAGAIN;
2486
2487	status = qed_mcp_set_led(hwfn, ptt, mode);
2488
2489	qed_ptt_release(hwfn, ptt);
2490
2491	return status;
2492}
2493
2494static int qed_recovery_process(struct qed_dev *cdev)
2495{
2496	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2497	struct qed_ptt *p_ptt;
2498	int rc = 0;
2499
2500	p_ptt = qed_ptt_acquire(p_hwfn);
2501	if (!p_ptt)
2502		return -EAGAIN;
2503
2504	rc = qed_start_recovery_process(p_hwfn, p_ptt);
2505
2506	qed_ptt_release(p_hwfn, p_ptt);
2507
2508	return rc;
2509}
2510
2511static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2512{
2513	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2514	struct qed_ptt *ptt;
2515	int rc = 0;
2516
2517	if (IS_VF(cdev))
2518		return 0;
2519
2520	ptt = qed_ptt_acquire(hwfn);
2521	if (!ptt)
2522		return -EAGAIN;
2523
2524	rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2525				   : QED_OV_WOL_DISABLED);
2526	if (rc)
2527		goto out;
2528	rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2529
2530out:
2531	qed_ptt_release(hwfn, ptt);
2532	return rc;
2533}
2534
2535static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2536{
2537	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2538	struct qed_ptt *ptt;
2539	int status = 0;
2540
2541	if (IS_VF(cdev))
2542		return 0;
2543
2544	ptt = qed_ptt_acquire(hwfn);
2545	if (!ptt)
2546		return -EAGAIN;
2547
2548	status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2549						QED_OV_DRIVER_STATE_ACTIVE :
2550						QED_OV_DRIVER_STATE_DISABLED);
2551
2552	qed_ptt_release(hwfn, ptt);
2553
2554	return status;
2555}
2556
2557static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2558{
2559	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2560	struct qed_ptt *ptt;
2561	int status = 0;
2562
2563	if (IS_VF(cdev))
2564		return 0;
2565
2566	ptt = qed_ptt_acquire(hwfn);
2567	if (!ptt)
2568		return -EAGAIN;
2569
2570	status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2571	if (status)
2572		goto out;
2573
2574	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2575
2576out:
2577	qed_ptt_release(hwfn, ptt);
2578	return status;
2579}
2580
2581static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2582{
2583	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2584	struct qed_ptt *ptt;
2585	int status = 0;
2586
2587	if (IS_VF(cdev))
2588		return 0;
2589
2590	ptt = qed_ptt_acquire(hwfn);
2591	if (!ptt)
2592		return -EAGAIN;
2593
2594	status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2595	if (status)
2596		goto out;
2597
2598	status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2599
2600out:
2601	qed_ptt_release(hwfn, ptt);
2602	return status;
2603}
2604
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2605static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2606				  u8 dev_addr, u32 offset, u32 len)
2607{
2608	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2609	struct qed_ptt *ptt;
2610	int rc = 0;
2611
2612	if (IS_VF(cdev))
2613		return 0;
2614
2615	ptt = qed_ptt_acquire(hwfn);
2616	if (!ptt)
2617		return -EAGAIN;
2618
2619	rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2620				  offset, len, buf);
2621
2622	qed_ptt_release(hwfn, ptt);
2623
2624	return rc;
2625}
2626
2627static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2628{
2629	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2630	struct qed_ptt *ptt;
2631	int rc = 0;
2632
2633	if (IS_VF(cdev))
2634		return 0;
2635
2636	ptt = qed_ptt_acquire(hwfn);
2637	if (!ptt)
2638		return -EAGAIN;
2639
2640	rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
2641
2642	qed_ptt_release(hwfn, ptt);
2643
2644	return rc;
2645}
2646
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2647static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2648{
2649	return QED_AFFIN_HWFN_IDX(cdev);
2650}
2651
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2652static struct qed_selftest_ops qed_selftest_ops_pass = {
2653	.selftest_memory = &qed_selftest_memory,
2654	.selftest_interrupt = &qed_selftest_interrupt,
2655	.selftest_register = &qed_selftest_register,
2656	.selftest_clock = &qed_selftest_clock,
2657	.selftest_nvram = &qed_selftest_nvram,
2658};
2659
2660const struct qed_common_ops qed_common_ops_pass = {
2661	.selftest = &qed_selftest_ops_pass,
2662	.probe = &qed_probe,
2663	.remove = &qed_remove,
2664	.set_power_state = &qed_set_power_state,
2665	.set_name = &qed_set_name,
2666	.update_pf_params = &qed_update_pf_params,
2667	.slowpath_start = &qed_slowpath_start,
2668	.slowpath_stop = &qed_slowpath_stop,
2669	.set_fp_int = &qed_set_int_fp,
2670	.get_fp_int = &qed_get_int_fp,
2671	.sb_init = &qed_sb_init,
2672	.sb_release = &qed_sb_release,
2673	.simd_handler_config = &qed_simd_handler_config,
2674	.simd_handler_clean = &qed_simd_handler_clean,
2675	.dbg_grc = &qed_dbg_grc,
2676	.dbg_grc_size = &qed_dbg_grc_size,
2677	.can_link_change = &qed_can_link_change,
2678	.set_link = &qed_set_link,
2679	.get_link = &qed_get_current_link,
2680	.drain = &qed_drain,
2681	.update_msglvl = &qed_init_dp,
 
 
 
2682	.dbg_all_data = &qed_dbg_all_data,
2683	.dbg_all_data_size = &qed_dbg_all_data_size,
2684	.chain_alloc = &qed_chain_alloc,
2685	.chain_free = &qed_chain_free,
2686	.nvm_flash = &qed_nvm_flash,
2687	.nvm_get_image = &qed_nvm_get_image,
2688	.set_coalesce = &qed_set_coalesce,
2689	.set_led = &qed_set_led,
2690	.recovery_process = &qed_recovery_process,
2691	.recovery_prolog = &qed_recovery_prolog,
 
2692	.update_drv_state = &qed_update_drv_state,
2693	.update_mac = &qed_update_mac,
2694	.update_mtu = &qed_update_mtu,
2695	.update_wol = &qed_update_wol,
2696	.db_recovery_add = &qed_db_recovery_add,
2697	.db_recovery_del = &qed_db_recovery_del,
2698	.read_module_eeprom = &qed_read_module_eeprom,
2699	.get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
2700	.read_nvm_cfg = &qed_nvm_flash_cfg_read,
2701	.read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
2702	.set_grc_config = &qed_set_grc_config,
 
 
 
2703};
2704
2705void qed_get_protocol_stats(struct qed_dev *cdev,
2706			    enum qed_mcp_protocol_type type,
2707			    union qed_mcp_protocol_stats *stats)
2708{
2709	struct qed_eth_stats eth_stats;
2710
2711	memset(stats, 0, sizeof(*stats));
2712
2713	switch (type) {
2714	case QED_MCP_LAN_STATS:
2715		qed_get_vport_stats(cdev, &eth_stats);
2716		stats->lan_stats.ucast_rx_pkts =
2717					eth_stats.common.rx_ucast_pkts;
2718		stats->lan_stats.ucast_tx_pkts =
2719					eth_stats.common.tx_ucast_pkts;
2720		stats->lan_stats.fcs_err = -1;
2721		break;
2722	case QED_MCP_FCOE_STATS:
2723		qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2724		break;
2725	case QED_MCP_ISCSI_STATS:
2726		qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2727		break;
2728	default:
2729		DP_VERBOSE(cdev, QED_MSG_SP,
2730			   "Invalid protocol type = %d\n", type);
2731		return;
2732	}
2733}
2734
2735int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2736{
2737	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2738		   "Scheduling slowpath task [Flag: %d]\n",
2739		   QED_SLOWPATH_MFW_TLV_REQ);
 
2740	smp_mb__before_atomic();
2741	set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
 
2742	smp_mb__after_atomic();
2743	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2744
2745	return 0;
2746}
2747
2748static void
2749qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2750{
2751	struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2752	struct qed_eth_stats_common *p_common;
2753	struct qed_generic_tlvs gen_tlvs;
2754	struct qed_eth_stats stats;
2755	int i;
2756
2757	memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2758	op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2759
2760	if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2761		tlv->flags.ipv4_csum_offload = true;
2762	if (gen_tlvs.feat_flags & QED_TLV_LSO)
2763		tlv->flags.lso_supported = true;
2764	tlv->flags.b_set = true;
2765
2766	for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2767		if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2768			ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2769			tlv->mac_set[i] = true;
2770		}
2771	}
2772
2773	qed_get_vport_stats(cdev, &stats);
2774	p_common = &stats.common;
2775	tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2776			 p_common->rx_bcast_pkts;
2777	tlv->rx_frames_set = true;
2778	tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2779			p_common->rx_bcast_bytes;
2780	tlv->rx_bytes_set = true;
2781	tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2782			 p_common->tx_bcast_pkts;
2783	tlv->tx_frames_set = true;
2784	tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2785			p_common->tx_bcast_bytes;
2786	tlv->rx_bytes_set = true;
2787}
2788
2789int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2790			  union qed_mfw_tlv_data *tlv_buf)
2791{
2792	struct qed_dev *cdev = hwfn->cdev;
2793	struct qed_common_cb_ops *ops;
2794
2795	ops = cdev->protocol_ops.common;
2796	if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2797		DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2798		return -EINVAL;
2799	}
2800
2801	switch (type) {
2802	case QED_MFW_TLV_GENERIC:
2803		qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2804		break;
2805	case QED_MFW_TLV_ETH:
2806		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2807		break;
2808	case QED_MFW_TLV_FCOE:
2809		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2810		break;
2811	case QED_MFW_TLV_ISCSI:
2812		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
2813		break;
2814	default:
2815		break;
2816	}
2817
2818	return 0;
 
 
 
 
 
2819}