Linux Audio

Check our new training course

Loading...
v6.8
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2018 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
  11#include <linux/ethtool.h>
  12#include <linux/module.h>
  13#include <linux/pci.h>
  14#include <linux/netdevice.h>
  15#include <linux/if_vlan.h>
  16#include <linux/interrupt.h>
  17#include <linux/etherdevice.h>
  18#include "bnxt_hsi.h"
  19#include "bnxt.h"
  20#include "bnxt_hwrm.h"
  21#include "bnxt_ulp.h"
  22#include "bnxt_sriov.h"
  23#include "bnxt_vfr.h"
  24#include "bnxt_ethtool.h"
  25
  26#ifdef CONFIG_BNXT_SRIOV
  27static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
  28					  struct bnxt_vf_info *vf, u16 event_id)
  29{
  30	struct hwrm_fwd_async_event_cmpl_input *req;
  31	struct hwrm_async_event_cmpl *async_cmpl;
  32	int rc = 0;
  33
  34	rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL);
  35	if (rc)
  36		goto exit;
  37
  38	if (vf)
  39		req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
  40	else
  41		/* broadcast this async event to all VFs */
  42		req->encap_async_event_target_id = cpu_to_le16(0xffff);
  43	async_cmpl =
  44		(struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl;
  45	async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
  46	async_cmpl->event_id = cpu_to_le16(event_id);
  47
  48	rc = hwrm_req_send(bp, req);
  49exit:
  50	if (rc)
  51		netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
  52			   rc);
  53	return rc;
  54}
  55
  56static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
  57{
 
 
 
 
  58	if (!bp->pf.active_vfs) {
  59		netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
  60		return -EINVAL;
  61	}
  62	if (vf_id >= bp->pf.active_vfs) {
  63		netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
  64		return -EINVAL;
  65	}
  66	return 0;
  67}
  68
  69int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
  70{
 
  71	struct bnxt *bp = netdev_priv(dev);
  72	struct hwrm_func_cfg_input *req;
  73	bool old_setting = false;
  74	struct bnxt_vf_info *vf;
 
  75	u32 func_flags;
  76	int rc;
  77
  78	if (bp->hwrm_spec_code < 0x10701)
  79		return -ENOTSUPP;
  80
  81	rc = bnxt_vf_ndo_prep(bp, vf_id);
  82	if (rc)
  83		return rc;
  84
  85	vf = &bp->pf.vf[vf_id];
  86	if (vf->flags & BNXT_VF_SPOOFCHK)
  87		old_setting = true;
  88	if (old_setting == setting)
  89		return 0;
  90
  91	if (setting)
  92		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
  93	else
  94		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
  95	/*TODO: if the driver supports VLAN filter on guest VLAN,
  96	 * the spoof check should also include vlan anti-spoofing
  97	 */
  98	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
 
 
 
  99	if (!rc) {
 100		req->fid = cpu_to_le16(vf->fw_fid);
 101		req->flags = cpu_to_le32(func_flags);
 102		rc = hwrm_req_send(bp, req);
 103		if (!rc) {
 104			if (setting)
 105				vf->flags |= BNXT_VF_SPOOFCHK;
 106			else
 107				vf->flags &= ~BNXT_VF_SPOOFCHK;
 108		}
 109	}
 110	return rc;
 111}
 112
 113static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
 114{
 115	struct hwrm_func_qcfg_output *resp;
 116	struct hwrm_func_qcfg_input *req;
 117	int rc;
 118
 119	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
 120	if (rc)
 
 
 
 
 121		return rc;
 122
 123	req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
 124	resp = hwrm_req_hold(bp, req);
 125	rc = hwrm_req_send(bp, req);
 126	if (!rc)
 127		vf->func_qcfg_flags = le16_to_cpu(resp->flags);
 128	hwrm_req_drop(bp, req);
 129	return rc;
 130}
 131
 132bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
 133{
 134	if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
 135		return !!(vf->flags & BNXT_VF_TRUST);
 136
 137	bnxt_hwrm_func_qcfg_flags(bp, vf);
 138	return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
 139}
 140
 141static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
 142{
 143	struct hwrm_func_cfg_input *req;
 144	int rc;
 145
 146	if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
 147		return 0;
 148
 149	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
 150	if (rc)
 151		return rc;
 152
 153	req->fid = cpu_to_le16(vf->fw_fid);
 154	if (vf->flags & BNXT_VF_TRUST)
 155		req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
 156	else
 157		req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
 158	return hwrm_req_send(bp, req);
 159}
 160
 161int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
 162{
 163	struct bnxt *bp = netdev_priv(dev);
 164	struct bnxt_vf_info *vf;
 165
 166	if (bnxt_vf_ndo_prep(bp, vf_id))
 167		return -EINVAL;
 168
 169	vf = &bp->pf.vf[vf_id];
 170	if (trusted)
 171		vf->flags |= BNXT_VF_TRUST;
 172	else
 173		vf->flags &= ~BNXT_VF_TRUST;
 174
 175	bnxt_hwrm_set_trusted_vf(bp, vf);
 176	return 0;
 177}
 178
 179int bnxt_get_vf_config(struct net_device *dev, int vf_id,
 180		       struct ifla_vf_info *ivi)
 181{
 182	struct bnxt *bp = netdev_priv(dev);
 183	struct bnxt_vf_info *vf;
 184	int rc;
 185
 186	rc = bnxt_vf_ndo_prep(bp, vf_id);
 187	if (rc)
 188		return rc;
 189
 190	ivi->vf = vf_id;
 191	vf = &bp->pf.vf[vf_id];
 192
 193	if (is_valid_ether_addr(vf->mac_addr))
 194		memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
 195	else
 196		memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
 197	ivi->max_tx_rate = vf->max_tx_rate;
 198	ivi->min_tx_rate = vf->min_tx_rate;
 199	ivi->vlan = vf->vlan;
 200	if (vf->flags & BNXT_VF_QOS)
 201		ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
 202	else
 203		ivi->qos = 0;
 204	ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
 205	ivi->trusted = bnxt_is_trusted_vf(bp, vf);
 206	if (!(vf->flags & BNXT_VF_LINK_FORCED))
 207		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
 208	else if (vf->flags & BNXT_VF_LINK_UP)
 209		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
 210	else
 211		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
 212
 213	return 0;
 214}
 215
 216int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
 217{
 
 218	struct bnxt *bp = netdev_priv(dev);
 219	struct hwrm_func_cfg_input *req;
 220	struct bnxt_vf_info *vf;
 221	int rc;
 222
 223	rc = bnxt_vf_ndo_prep(bp, vf_id);
 224	if (rc)
 225		return rc;
 226	/* reject bc or mc mac addr, zero mac addr means allow
 227	 * VF to use its own mac addr
 228	 */
 229	if (is_multicast_ether_addr(mac)) {
 230		netdev_err(dev, "Invalid VF ethernet address\n");
 231		return -EINVAL;
 232	}
 233	vf = &bp->pf.vf[vf_id];
 234
 235	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
 236	if (rc)
 237		return rc;
 238
 239	memcpy(vf->mac_addr, mac, ETH_ALEN);
 240
 241	req->fid = cpu_to_le16(vf->fw_fid);
 242	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
 243	memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
 244	return hwrm_req_send(bp, req);
 245}
 246
 247int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
 248		     __be16 vlan_proto)
 249{
 
 250	struct bnxt *bp = netdev_priv(dev);
 251	struct hwrm_func_cfg_input *req;
 252	struct bnxt_vf_info *vf;
 253	u16 vlan_tag;
 254	int rc;
 255
 256	if (bp->hwrm_spec_code < 0x10201)
 257		return -ENOTSUPP;
 258
 259	if (vlan_proto != htons(ETH_P_8021Q))
 260		return -EPROTONOSUPPORT;
 261
 262	rc = bnxt_vf_ndo_prep(bp, vf_id);
 263	if (rc)
 264		return rc;
 265
 266	/* TODO: needed to implement proper handling of user priority,
 267	 * currently fail the command if there is valid priority
 268	 */
 269	if (vlan_id > 4095 || qos)
 270		return -EINVAL;
 271
 272	vf = &bp->pf.vf[vf_id];
 273	vlan_tag = vlan_id;
 274	if (vlan_tag == vf->vlan)
 275		return 0;
 276
 277	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
 278	if (!rc) {
 279		req->fid = cpu_to_le16(vf->fw_fid);
 280		req->dflt_vlan = cpu_to_le16(vlan_tag);
 281		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
 282		rc = hwrm_req_send(bp, req);
 283		if (!rc)
 284			vf->vlan = vlan_tag;
 285	}
 286	return rc;
 287}
 288
 289int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
 290		   int max_tx_rate)
 291{
 
 292	struct bnxt *bp = netdev_priv(dev);
 293	struct hwrm_func_cfg_input *req;
 294	struct bnxt_vf_info *vf;
 295	u32 pf_link_speed;
 296	int rc;
 297
 298	rc = bnxt_vf_ndo_prep(bp, vf_id);
 299	if (rc)
 300		return rc;
 301
 302	vf = &bp->pf.vf[vf_id];
 303	pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
 304	if (max_tx_rate > pf_link_speed) {
 305		netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
 306			    max_tx_rate, vf_id);
 307		return -EINVAL;
 308	}
 309
 310	if (min_tx_rate > pf_link_speed) {
 311		netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
 312			    min_tx_rate, vf_id);
 313		return -EINVAL;
 314	}
 315	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
 316		return 0;
 317	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
 
 
 
 
 
 
 318	if (!rc) {
 319		req->fid = cpu_to_le16(vf->fw_fid);
 320		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
 321					   FUNC_CFG_REQ_ENABLES_MIN_BW);
 322		req->max_bw = cpu_to_le32(max_tx_rate);
 323		req->min_bw = cpu_to_le32(min_tx_rate);
 324		rc = hwrm_req_send(bp, req);
 325		if (!rc) {
 326			vf->min_tx_rate = min_tx_rate;
 327			vf->max_tx_rate = max_tx_rate;
 328		}
 329	}
 330	return rc;
 331}
 332
 333int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
 334{
 335	struct bnxt *bp = netdev_priv(dev);
 336	struct bnxt_vf_info *vf;
 337	int rc;
 338
 339	rc = bnxt_vf_ndo_prep(bp, vf_id);
 340	if (rc)
 341		return rc;
 342
 343	vf = &bp->pf.vf[vf_id];
 344
 345	vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
 346	switch (link) {
 347	case IFLA_VF_LINK_STATE_AUTO:
 348		vf->flags |= BNXT_VF_LINK_UP;
 349		break;
 350	case IFLA_VF_LINK_STATE_DISABLE:
 351		vf->flags |= BNXT_VF_LINK_FORCED;
 352		break;
 353	case IFLA_VF_LINK_STATE_ENABLE:
 354		vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
 355		break;
 356	default:
 357		netdev_err(bp->dev, "Invalid link option\n");
 358		rc = -EINVAL;
 359		break;
 360	}
 361	if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
 362		rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
 363			ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
 364	return rc;
 365}
 366
 367static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
 368{
 369	int i;
 370	struct bnxt_vf_info *vf;
 371
 372	for (i = 0; i < num_vfs; i++) {
 373		vf = &bp->pf.vf[i];
 374		memset(vf, 0, sizeof(*vf));
 375	}
 376	return 0;
 377}
 378
 379static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
 380{
 381	struct hwrm_func_vf_resc_free_input *req;
 382	struct bnxt_pf_info *pf = &bp->pf;
 383	int i, rc;
 384
 385	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE);
 386	if (rc)
 387		return rc;
 388
 389	hwrm_req_hold(bp, req);
 390	for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
 391		req->vf_id = cpu_to_le16(i);
 392		rc = hwrm_req_send(bp, req);
 
 393		if (rc)
 394			break;
 395	}
 396	hwrm_req_drop(bp, req);
 397	return rc;
 398}
 399
 400static void bnxt_free_vf_resources(struct bnxt *bp)
 401{
 402	struct pci_dev *pdev = bp->pdev;
 403	int i;
 404
 405	kfree(bp->pf.vf_event_bmap);
 406	bp->pf.vf_event_bmap = NULL;
 407
 408	for (i = 0; i < 4; i++) {
 409		if (bp->pf.hwrm_cmd_req_addr[i]) {
 410			dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
 411					  bp->pf.hwrm_cmd_req_addr[i],
 412					  bp->pf.hwrm_cmd_req_dma_addr[i]);
 413			bp->pf.hwrm_cmd_req_addr[i] = NULL;
 414		}
 415	}
 416
 417	bp->pf.active_vfs = 0;
 418	kfree(bp->pf.vf);
 419	bp->pf.vf = NULL;
 420}
 421
 422static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
 423{
 424	struct pci_dev *pdev = bp->pdev;
 425	u32 nr_pages, size, i, j, k = 0;
 426
 427	bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
 428	if (!bp->pf.vf)
 429		return -ENOMEM;
 430
 431	bnxt_set_vf_attr(bp, num_vfs);
 432
 433	size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
 434	nr_pages = size / BNXT_PAGE_SIZE;
 435	if (size & (BNXT_PAGE_SIZE - 1))
 436		nr_pages++;
 437
 438	for (i = 0; i < nr_pages; i++) {
 439		bp->pf.hwrm_cmd_req_addr[i] =
 440			dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
 441					   &bp->pf.hwrm_cmd_req_dma_addr[i],
 442					   GFP_KERNEL);
 443
 444		if (!bp->pf.hwrm_cmd_req_addr[i])
 445			return -ENOMEM;
 446
 447		for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
 448			struct bnxt_vf_info *vf = &bp->pf.vf[k];
 449
 450			vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
 451						j * BNXT_HWRM_REQ_MAX_SIZE;
 452			vf->hwrm_cmd_req_dma_addr =
 453				bp->pf.hwrm_cmd_req_dma_addr[i] + j *
 454				BNXT_HWRM_REQ_MAX_SIZE;
 455			k++;
 456		}
 457	}
 458
 459	/* Max 128 VF's */
 460	bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
 461	if (!bp->pf.vf_event_bmap)
 462		return -ENOMEM;
 463
 464	bp->pf.hwrm_cmd_req_pages = nr_pages;
 465	return 0;
 466}
 467
 468static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 469{
 470	struct hwrm_func_buf_rgtr_input *req;
 471	int rc;
 472
 473	rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR);
 474	if (rc)
 475		return rc;
 476
 477	req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
 478	req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
 479	req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
 480	req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
 481	req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
 482	req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
 483	req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
 484
 485	return hwrm_req_send(bp, req);
 486}
 487
 488static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
 
 489{
 490	struct hwrm_func_cfg_input *req;
 491	struct bnxt_vf_info *vf;
 492	int rc;
 493
 494	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
 495	if (rc)
 496		return rc;
 497
 498	vf = &bp->pf.vf[vf_id];
 499	req->fid = cpu_to_le16(vf->fw_fid);
 
 500
 501	if (is_valid_ether_addr(vf->mac_addr)) {
 502		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
 503		memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN);
 504	}
 505	if (vf->vlan) {
 506		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
 507		req->dflt_vlan = cpu_to_le16(vf->vlan);
 508	}
 509	if (vf->max_tx_rate) {
 510		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
 511					    FUNC_CFG_REQ_ENABLES_MIN_BW);
 512		req->max_bw = cpu_to_le32(vf->max_tx_rate);
 513		req->min_bw = cpu_to_le32(vf->min_tx_rate);
 
 
 514	}
 515	if (vf->flags & BNXT_VF_TRUST)
 516		req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
 517
 518	return hwrm_req_send(bp, req);
 519}
 520
 521/* Only called by PF to reserve resources for VFs, returns actual number of
 522 * VFs configured, or < 0 on error.
 523 */
 524static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
 525{
 526	struct hwrm_func_vf_resource_cfg_input *req;
 527	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 528	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
 529	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
 530	struct bnxt_pf_info *pf = &bp->pf;
 531	int i, rc = 0, min = 1;
 532	u16 vf_msix = 0;
 533	u16 vf_rss;
 534
 535	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG);
 536	if (rc)
 537		return rc;
 538
 539	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
 540		vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
 541		vf_ring_grps = 0;
 542	} else {
 543		vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
 544	}
 545	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
 546	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
 547	if (bp->flags & BNXT_FLAG_AGG_RINGS)
 548		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
 549	else
 550		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
 551	vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
 552	vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
 
 553	vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
 554
 555	req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
 556	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
 557		min = 0;
 558		req->min_rsscos_ctx = cpu_to_le16(min);
 559	}
 560	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
 561	    pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
 562		req->min_cmpl_rings = cpu_to_le16(min);
 563		req->min_tx_rings = cpu_to_le16(min);
 564		req->min_rx_rings = cpu_to_le16(min);
 565		req->min_l2_ctxs = cpu_to_le16(min);
 566		req->min_vnics = cpu_to_le16(min);
 567		req->min_stat_ctx = cpu_to_le16(min);
 568		if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
 569			req->min_hw_ring_grps = cpu_to_le16(min);
 570	} else {
 571		vf_cp_rings /= num_vfs;
 572		vf_tx_rings /= num_vfs;
 573		vf_rx_rings /= num_vfs;
 574		if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) &&
 575		    vf_vnics >= pf->max_vfs) {
 576			/* Take into account that FW has pre-reserved 1 VNIC for
 577			 * each pf->max_vfs.
 578			 */
 579			vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs;
 580		} else {
 581			vf_vnics /= num_vfs;
 582		}
 583		vf_stat_ctx /= num_vfs;
 584		vf_ring_grps /= num_vfs;
 585		vf_rss /= num_vfs;
 586
 587		vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 588		req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
 589		req->min_tx_rings = cpu_to_le16(vf_tx_rings);
 590		req->min_rx_rings = cpu_to_le16(vf_rx_rings);
 591		req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
 592		req->min_vnics = cpu_to_le16(vf_vnics);
 593		req->min_stat_ctx = cpu_to_le16(vf_stat_ctx);
 594		req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
 595		req->min_rsscos_ctx = cpu_to_le16(vf_rss);
 596	}
 597	req->max_cmpl_rings = cpu_to_le16(vf_cp_rings);
 598	req->max_tx_rings = cpu_to_le16(vf_tx_rings);
 599	req->max_rx_rings = cpu_to_le16(vf_rx_rings);
 600	req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
 601	req->max_vnics = cpu_to_le16(vf_vnics);
 602	req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
 603	req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
 604	req->max_rsscos_ctx = cpu_to_le16(vf_rss);
 605	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
 606		req->max_msix = cpu_to_le16(vf_msix / num_vfs);
 607
 608	hwrm_req_hold(bp, req);
 609	for (i = 0; i < num_vfs; i++) {
 610		if (reset)
 611			__bnxt_set_vf_params(bp, i);
 612
 613		req->vf_id = cpu_to_le16(pf->first_vf_id + i);
 614		rc = hwrm_req_send(bp, req);
 
 615		if (rc)
 616			break;
 617		pf->active_vfs = i + 1;
 618		pf->vf[i].fw_fid = pf->first_vf_id + i;
 619	}
 620
 621	if (pf->active_vfs) {
 622		u16 n = pf->active_vfs;
 623
 624		hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n;
 625		hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n;
 626		hw_resc->max_hw_ring_grps -=
 627			le16_to_cpu(req->min_hw_ring_grps) * n;
 628		hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n;
 629		hw_resc->max_rsscos_ctxs -=
 630			le16_to_cpu(req->min_rsscos_ctx) * n;
 631		hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
 632		hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
 633		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
 634			hw_resc->max_nqs -= vf_msix;
 635
 636		rc = pf->active_vfs;
 637	}
 638	hwrm_req_drop(bp, req);
 639	return rc;
 640}
 641
 642/* Only called by PF to reserve resources for VFs, returns actual number of
 643 * VFs configured, or < 0 on error.
 644 */
 645static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 646{
 
 647	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
 648	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 
 649	struct bnxt_pf_info *pf = &bp->pf;
 650	struct hwrm_func_cfg_input *req;
 651	int total_vf_tx_rings = 0;
 652	u16 vf_ring_grps;
 653	u32 mtu, i;
 654	int rc;
 655
 656	rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
 657	if (rc)
 658		return rc;
 659
 660	/* Remaining rings are distributed equally amongs VF's for now */
 661	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
 662	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
 663	if (bp->flags & BNXT_FLAG_AGG_RINGS)
 664		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
 665			      num_vfs;
 666	else
 667		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
 668			      num_vfs;
 669	vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
 670	vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
 671	vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
 672	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 673
 674	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
 675				   FUNC_CFG_REQ_ENABLES_MRU |
 676				   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
 677				   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
 678				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
 679				   FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
 680				   FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
 681				   FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
 682				   FUNC_CFG_REQ_ENABLES_NUM_VNICS |
 683				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
 684
 685	mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
 686	req->mru = cpu_to_le16(mtu);
 687	req->admin_mtu = cpu_to_le16(mtu);
 688
 689	req->num_rsscos_ctxs = cpu_to_le16(1);
 690	req->num_cmpl_rings = cpu_to_le16(vf_cp_rings);
 691	req->num_tx_rings = cpu_to_le16(vf_tx_rings);
 692	req->num_rx_rings = cpu_to_le16(vf_rx_rings);
 693	req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
 694	req->num_l2_ctxs = cpu_to_le16(4);
 695
 696	req->num_vnics = cpu_to_le16(vf_vnics);
 697	/* FIXME spec currently uses 1 bit for stats ctx */
 698	req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
 699
 700	hwrm_req_hold(bp, req);
 701	for (i = 0; i < num_vfs; i++) {
 702		int vf_tx_rsvd = vf_tx_rings;
 703
 704		req->fid = cpu_to_le16(pf->first_vf_id + i);
 705		rc = hwrm_req_send(bp, req);
 
 706		if (rc)
 707			break;
 708		pf->active_vfs = i + 1;
 709		pf->vf[i].fw_fid = le16_to_cpu(req->fid);
 710		rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
 711					      &vf_tx_rsvd);
 712		if (rc)
 713			break;
 714		total_vf_tx_rings += vf_tx_rsvd;
 715	}
 716	hwrm_req_drop(bp, req);
 717	if (pf->active_vfs) {
 718		hw_resc->max_tx_rings -= total_vf_tx_rings;
 719		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
 720		hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
 721		hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
 722		hw_resc->max_rsscos_ctxs -= num_vfs;
 723		hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
 724		hw_resc->max_vnics -= vf_vnics * num_vfs;
 725		rc = pf->active_vfs;
 726	}
 727	return rc;
 728}
 729
 730static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
 731{
 732	if (BNXT_NEW_RM(bp))
 733		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
 734	else
 735		return bnxt_hwrm_func_cfg(bp, num_vfs);
 736}
 737
 738int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
 739{
 740	int rc;
 741
 742	/* Register buffers for VFs */
 743	rc = bnxt_hwrm_func_buf_rgtr(bp);
 744	if (rc)
 745		return rc;
 746
 747	/* Reserve resources for VFs */
 748	rc = bnxt_func_cfg(bp, *num_vfs, reset);
 749	if (rc != *num_vfs) {
 750		if (rc <= 0) {
 751			netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
 752			*num_vfs = 0;
 753			return rc;
 754		}
 755		netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
 756			    rc);
 757		*num_vfs = rc;
 758	}
 759
 
 760	return 0;
 761}
 762
 763static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 764{
 765	int rc = 0, vfs_supported;
 766	int min_rx_rings, min_tx_rings, min_rss_ctxs;
 767	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 768	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
 769	int avail_cp, avail_stat;
 770
 771	/* Check if we can enable requested num of vf's. At a mininum
 772	 * we require 1 RX 1 TX rings for each VF. In this minimum conf
 773	 * features like TPA will not be available.
 774	 */
 775	vfs_supported = *num_vfs;
 776
 777	avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
 778	avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
 779	avail_cp = min_t(int, avail_cp, avail_stat);
 780
 781	while (vfs_supported) {
 782		min_rx_rings = vfs_supported;
 783		min_tx_rings = vfs_supported;
 784		min_rss_ctxs = vfs_supported;
 785
 786		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
 787			if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
 788			    min_rx_rings)
 789				rx_ok = 1;
 790		} else {
 791			if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
 792			    min_rx_rings)
 793				rx_ok = 1;
 794		}
 795		if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
 796		    avail_cp < min_rx_rings)
 797			rx_ok = 0;
 798
 799		if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
 800		    avail_cp >= min_tx_rings)
 801			tx_ok = 1;
 802
 803		if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
 804		    min_rss_ctxs)
 805			rss_ok = 1;
 806
 807		if (tx_ok && rx_ok && rss_ok)
 808			break;
 809
 810		vfs_supported--;
 811	}
 812
 813	if (!vfs_supported) {
 814		netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
 815		return -EINVAL;
 816	}
 817
 818	if (vfs_supported != *num_vfs) {
 819		netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
 820			    *num_vfs, vfs_supported);
 821		*num_vfs = vfs_supported;
 822	}
 823
 824	rc = bnxt_alloc_vf_resources(bp, *num_vfs);
 825	if (rc)
 826		goto err_out1;
 827
 828	rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
 829	if (rc)
 830		goto err_out2;
 831
 832	rc = pci_enable_sriov(bp->pdev, *num_vfs);
 833	if (rc)
 834		goto err_out2;
 835
 836	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
 837		return 0;
 838
 839	/* Create representors for VFs in switchdev mode */
 840	devl_lock(bp->dl);
 841	rc = bnxt_vf_reps_create(bp);
 842	devl_unlock(bp->dl);
 843	if (rc) {
 844		netdev_info(bp->dev, "Cannot enable VFS as representors cannot be created\n");
 845		goto err_out3;
 846	}
 847
 848	return 0;
 849
 850err_out3:
 851	/* Disable SR-IOV */
 852	pci_disable_sriov(bp->pdev);
 853
 854err_out2:
 855	/* Free the resources reserved for various VF's */
 856	bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
 857
 858	/* Restore the max resources */
 859	bnxt_hwrm_func_qcaps(bp);
 860
 861err_out1:
 862	bnxt_free_vf_resources(bp);
 863
 864	return rc;
 865}
 866
 867void bnxt_sriov_disable(struct bnxt *bp)
 868{
 869	u16 num_vfs = pci_num_vf(bp->pdev);
 870
 871	if (!num_vfs)
 872		return;
 873
 874	/* synchronize VF and VF-rep create and destroy */
 875	devl_lock(bp->dl);
 876	bnxt_vf_reps_destroy(bp);
 877
 878	if (pci_vfs_assigned(bp->pdev)) {
 879		bnxt_hwrm_fwd_async_event_cmpl(
 880			bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
 881		netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
 882			    num_vfs);
 883	} else {
 884		pci_disable_sriov(bp->pdev);
 885		/* Free the HW resources reserved for various VF's */
 886		bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
 887	}
 888	devl_unlock(bp->dl);
 889
 890	bnxt_free_vf_resources(bp);
 891
 892	/* Reclaim all resources for the PF. */
 893	rtnl_lock();
 894	bnxt_restore_pf_fw_resources(bp);
 895	rtnl_unlock();
 
 
 896}
 897
 898int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
 899{
 900	struct net_device *dev = pci_get_drvdata(pdev);
 901	struct bnxt *bp = netdev_priv(dev);
 902
 903	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
 904		netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
 905		return 0;
 906	}
 907
 908	rtnl_lock();
 909	if (!netif_running(dev)) {
 910		netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
 911		rtnl_unlock();
 912		return 0;
 913	}
 914	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
 915		netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
 916		rtnl_unlock();
 917		return 0;
 918	}
 919	bp->sriov_cfg = true;
 920	rtnl_unlock();
 921
 922	if (pci_vfs_assigned(bp->pdev)) {
 923		netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
 924		num_vfs = 0;
 925		goto sriov_cfg_exit;
 926	}
 927
 928	/* Check if enabled VFs is same as requested */
 929	if (num_vfs && num_vfs == bp->pf.active_vfs)
 930		goto sriov_cfg_exit;
 931
 932	/* if there are previous existing VFs, clean them up */
 933	bnxt_sriov_disable(bp);
 934	if (!num_vfs)
 935		goto sriov_cfg_exit;
 936
 937	bnxt_sriov_enable(bp, &num_vfs);
 938
 939sriov_cfg_exit:
 940	bp->sriov_cfg = false;
 941	wake_up(&bp->sriov_cfg_wait);
 942
 943	return num_vfs;
 944}
 945
 946static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 947			      void *encap_resp, __le64 encap_resp_addr,
 948			      __le16 encap_resp_cpr, u32 msg_size)
 949{
 950	struct hwrm_fwd_resp_input *req;
 951	int rc;
 952
 953	if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
 954		return -EINVAL;
 955
 956	rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
 957	if (!rc) {
 958		/* Set the new target id */
 959		req->target_id = cpu_to_le16(vf->fw_fid);
 960		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
 961		req->encap_resp_len = cpu_to_le16(msg_size);
 962		req->encap_resp_addr = encap_resp_addr;
 963		req->encap_resp_cmpl_ring = encap_resp_cpr;
 964		memcpy(req->encap_resp, encap_resp, msg_size);
 965
 966		rc = hwrm_req_send(bp, req);
 967	}
 
 
 
 
 
 
 
 968	if (rc)
 969		netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
 970	return rc;
 971}
 972
 973static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 974				  u32 msg_size)
 975{
 976	struct hwrm_reject_fwd_resp_input *req;
 977	int rc;
 978
 979	if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
 980		return -EINVAL;
 981
 982	rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP);
 983	if (!rc) {
 984		/* Set the new target id */
 985		req->target_id = cpu_to_le16(vf->fw_fid);
 986		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
 987		memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
 988
 989		rc = hwrm_req_send(bp, req);
 990	}
 991	if (rc)
 992		netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
 993	return rc;
 994}
 995
 996static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 997				   u32 msg_size)
 998{
 999	struct hwrm_exec_fwd_resp_input *req;
1000	int rc;
1001
1002	if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
1003		return -EINVAL;
1004
1005	rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP);
1006	if (!rc) {
1007		/* Set the new target id */
1008		req->target_id = cpu_to_le16(vf->fw_fid);
1009		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
1010		memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
1011
1012		rc = hwrm_req_send(bp, req);
1013	}
1014	if (rc)
1015		netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
1016	return rc;
1017}
1018
1019static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1020{
1021	u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
1022	struct hwrm_func_vf_cfg_input *req =
1023		(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
1024
1025	/* Allow VF to set a valid MAC address, if trust is set to on or
1026	 * if the PF assigned MAC address is zero
1027	 */
1028	if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
1029		bool trust = bnxt_is_trusted_vf(bp, vf);
1030
1031		if (is_valid_ether_addr(req->dflt_mac_addr) &&
1032		    (trust || !is_valid_ether_addr(vf->mac_addr) ||
1033		     ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
1034			ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
1035			return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1036		}
1037		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1038	}
1039	return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1040}
1041
1042static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1043{
1044	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
1045	struct hwrm_cfa_l2_filter_alloc_input *req =
1046		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
1047	bool mac_ok = false;
1048
1049	if (!is_valid_ether_addr((const u8 *)req->l2_addr))
1050		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1051
1052	/* Allow VF to set a valid MAC address, if trust is set to on.
1053	 * Or VF MAC address must first match MAC address in PF's context.
1054	 * Otherwise, it must match the VF MAC address if firmware spec >=
1055	 * 1.2.2
1056	 */
1057	if (bnxt_is_trusted_vf(bp, vf)) {
1058		mac_ok = true;
1059	} else if (is_valid_ether_addr(vf->mac_addr)) {
1060		if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1061			mac_ok = true;
1062	} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1063		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1064			mac_ok = true;
1065	} else {
1066		/* There are two cases:
1067		 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1068		 *   to the PF and so it doesn't have to match
1069		 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1070		 *   valid MAC address and firmware spec >= 0x10202
1071		 */
1072		mac_ok = true;
1073	}
1074	if (mac_ok)
1075		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1076	return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1077}
1078
1079static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1080{
1081	int rc = 0;
1082
1083	if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1084		/* real link */
1085		rc = bnxt_hwrm_exec_fwd_resp(
1086			bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1087	} else {
1088		struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
1089		struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1090
1091		phy_qcfg_req =
1092		(struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1093		mutex_lock(&bp->link_lock);
1094		memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1095		       sizeof(phy_qcfg_resp));
1096		mutex_unlock(&bp->link_lock);
1097		phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1098		phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1099		phy_qcfg_resp.valid = 1;
1100
1101		if (vf->flags & BNXT_VF_LINK_UP) {
1102			/* if physical link is down, force link up on VF */
1103			if (phy_qcfg_resp.link !=
1104			    PORT_PHY_QCFG_RESP_LINK_LINK) {
1105				phy_qcfg_resp.link =
1106					PORT_PHY_QCFG_RESP_LINK_LINK;
1107				phy_qcfg_resp.link_speed = cpu_to_le16(
1108					PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1109				phy_qcfg_resp.duplex_cfg =
1110					PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1111				phy_qcfg_resp.duplex_state =
1112					PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1113				phy_qcfg_resp.pause =
1114					(PORT_PHY_QCFG_RESP_PAUSE_TX |
1115					 PORT_PHY_QCFG_RESP_PAUSE_RX);
1116			}
1117		} else {
1118			/* force link down */
1119			phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1120			phy_qcfg_resp.link_speed = 0;
1121			phy_qcfg_resp.duplex_state =
1122				PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1123			phy_qcfg_resp.pause = 0;
1124		}
1125		rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1126					phy_qcfg_req->resp_addr,
1127					phy_qcfg_req->cmpl_ring,
1128					sizeof(phy_qcfg_resp));
1129	}
1130	return rc;
1131}
1132
1133static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1134{
1135	int rc = 0;
1136	struct input *encap_req = vf->hwrm_cmd_req_addr;
1137	u32 req_type = le16_to_cpu(encap_req->req_type);
1138
1139	switch (req_type) {
1140	case HWRM_FUNC_VF_CFG:
1141		rc = bnxt_vf_configure_mac(bp, vf);
1142		break;
1143	case HWRM_CFA_L2_FILTER_ALLOC:
1144		rc = bnxt_vf_validate_set_mac(bp, vf);
1145		break;
1146	case HWRM_FUNC_CFG:
1147		/* TODO Validate if VF is allowed to change mac address,
1148		 * mtu, num of rings etc
1149		 */
1150		rc = bnxt_hwrm_exec_fwd_resp(
1151			bp, vf, sizeof(struct hwrm_func_cfg_input));
1152		break;
1153	case HWRM_PORT_PHY_QCFG:
1154		rc = bnxt_vf_set_link(bp, vf);
1155		break;
1156	default:
1157		break;
1158	}
1159	return rc;
1160}
1161
1162void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1163{
1164	u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1165
1166	/* Scan through VF's and process commands */
1167	while (1) {
1168		vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1169		if (vf_id >= active_vfs)
1170			break;
1171
1172		clear_bit(vf_id, bp->pf.vf_event_bmap);
1173		bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1174		i = vf_id + 1;
1175	}
1176}
1177
1178int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
1179{
1180	struct hwrm_func_vf_cfg_input *req;
1181	int rc = 0;
1182
1183	if (!BNXT_VF(bp))
1184		return 0;
1185
1186	if (bp->hwrm_spec_code < 0x10202) {
1187		if (is_valid_ether_addr(bp->vf.mac_addr))
1188			rc = -EADDRNOTAVAIL;
1189		goto mac_done;
1190	}
1191
1192	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
1193	if (rc)
1194		goto mac_done;
1195
1196	req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1197	memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
1198	if (!strict)
1199		hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
1200	rc = hwrm_req_send(bp, req);
1201mac_done:
1202	if (rc && strict) {
1203		rc = -EADDRNOTAVAIL;
1204		netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1205			    mac);
1206		return rc;
1207	}
1208	return 0;
1209}
1210
1211void bnxt_update_vf_mac(struct bnxt *bp)
1212{
1213	struct hwrm_func_qcaps_output *resp;
1214	struct hwrm_func_qcaps_input *req;
1215	bool inform_pf = false;
1216
1217	if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS))
1218		return;
1219
1220	req->fid = cpu_to_le16(0xffff);
1221
1222	resp = hwrm_req_hold(bp, req);
1223	if (hwrm_req_send(bp, req))
1224		goto update_vf_mac_exit;
1225
1226	/* Store MAC address from the firmware.  There are 2 cases:
1227	 * 1. MAC address is valid.  It is assigned from the PF and we
1228	 *    need to override the current VF MAC address with it.
1229	 * 2. MAC address is zero.  The VF will use a random MAC address by
1230	 *    default but the stored zero MAC will allow the VF user to change
1231	 *    the random MAC address using ndo_set_mac_address() if he wants.
1232	 */
1233	if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
1234		memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1235		/* This means we are now using our own MAC address, let
1236		 * the PF know about this MAC address.
1237		 */
1238		if (!is_valid_ether_addr(bp->vf.mac_addr))
1239			inform_pf = true;
1240	}
1241
1242	/* overwrite netdev dev_addr with admin VF MAC */
1243	if (is_valid_ether_addr(bp->vf.mac_addr))
1244		eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
1245update_vf_mac_exit:
1246	hwrm_req_drop(bp, req);
1247	if (inform_pf)
1248		bnxt_approve_mac(bp, bp->dev->dev_addr, false);
1249}
1250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1251#else
1252
1253int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1254{
1255	if (*num_vfs)
1256		return -EOPNOTSUPP;
1257	return 0;
1258}
1259
1260void bnxt_sriov_disable(struct bnxt *bp)
1261{
1262}
1263
1264void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1265{
1266	netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1267}
1268
1269void bnxt_update_vf_mac(struct bnxt *bp)
1270{
1271}
1272
1273int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
1274{
1275	return 0;
1276}
1277#endif
v5.9
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2018 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
 
  11#include <linux/module.h>
  12#include <linux/pci.h>
  13#include <linux/netdevice.h>
  14#include <linux/if_vlan.h>
  15#include <linux/interrupt.h>
  16#include <linux/etherdevice.h>
  17#include "bnxt_hsi.h"
  18#include "bnxt.h"
 
  19#include "bnxt_ulp.h"
  20#include "bnxt_sriov.h"
  21#include "bnxt_vfr.h"
  22#include "bnxt_ethtool.h"
  23
  24#ifdef CONFIG_BNXT_SRIOV
  25static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
  26					  struct bnxt_vf_info *vf, u16 event_id)
  27{
  28	struct hwrm_fwd_async_event_cmpl_input req = {0};
  29	struct hwrm_async_event_cmpl *async_cmpl;
  30	int rc = 0;
  31
  32	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
 
 
 
  33	if (vf)
  34		req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
  35	else
  36		/* broadcast this async event to all VFs */
  37		req.encap_async_event_target_id = cpu_to_le16(0xffff);
  38	async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
 
  39	async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
  40	async_cmpl->event_id = cpu_to_le16(event_id);
  41
  42	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
  43	if (rc)
  44		netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
  45			   rc);
  46	return rc;
  47}
  48
  49static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
  50{
  51	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  52		netdev_err(bp->dev, "vf ndo called though PF is down\n");
  53		return -EINVAL;
  54	}
  55	if (!bp->pf.active_vfs) {
  56		netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
  57		return -EINVAL;
  58	}
  59	if (vf_id >= bp->pf.active_vfs) {
  60		netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
  61		return -EINVAL;
  62	}
  63	return 0;
  64}
  65
  66int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
  67{
  68	struct hwrm_func_cfg_input req = {0};
  69	struct bnxt *bp = netdev_priv(dev);
 
 
  70	struct bnxt_vf_info *vf;
  71	bool old_setting = false;
  72	u32 func_flags;
  73	int rc;
  74
  75	if (bp->hwrm_spec_code < 0x10701)
  76		return -ENOTSUPP;
  77
  78	rc = bnxt_vf_ndo_prep(bp, vf_id);
  79	if (rc)
  80		return rc;
  81
  82	vf = &bp->pf.vf[vf_id];
  83	if (vf->flags & BNXT_VF_SPOOFCHK)
  84		old_setting = true;
  85	if (old_setting == setting)
  86		return 0;
  87
  88	if (setting)
  89		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
  90	else
  91		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
  92	/*TODO: if the driver supports VLAN filter on guest VLAN,
  93	 * the spoof check should also include vlan anti-spoofing
  94	 */
  95	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  96	req.fid = cpu_to_le16(vf->fw_fid);
  97	req.flags = cpu_to_le32(func_flags);
  98	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  99	if (!rc) {
 100		if (setting)
 101			vf->flags |= BNXT_VF_SPOOFCHK;
 102		else
 103			vf->flags &= ~BNXT_VF_SPOOFCHK;
 
 
 
 
 
 104	}
 105	return rc;
 106}
 107
 108static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
 109{
 110	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 111	struct hwrm_func_qcfg_input req = {0};
 112	int rc;
 113
 114	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
 115	req.fid = cpu_to_le16(vf->fw_fid);
 116	mutex_lock(&bp->hwrm_cmd_lock);
 117	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 118	if (rc) {
 119		mutex_unlock(&bp->hwrm_cmd_lock);
 120		return rc;
 121	}
 122	vf->func_qcfg_flags = le16_to_cpu(resp->flags);
 123	mutex_unlock(&bp->hwrm_cmd_lock);
 124	return 0;
 
 
 
 
 125}
 126
 127static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
 128{
 129	if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
 130		return !!(vf->flags & BNXT_VF_TRUST);
 131
 132	bnxt_hwrm_func_qcfg_flags(bp, vf);
 133	return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
 134}
 135
 136static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
 137{
 138	struct hwrm_func_cfg_input req = {0};
 
 139
 140	if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
 141		return 0;
 142
 143	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 144	req.fid = cpu_to_le16(vf->fw_fid);
 
 
 
 145	if (vf->flags & BNXT_VF_TRUST)
 146		req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
 147	else
 148		req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
 149	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 150}
 151
 152int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
 153{
 154	struct bnxt *bp = netdev_priv(dev);
 155	struct bnxt_vf_info *vf;
 156
 157	if (bnxt_vf_ndo_prep(bp, vf_id))
 158		return -EINVAL;
 159
 160	vf = &bp->pf.vf[vf_id];
 161	if (trusted)
 162		vf->flags |= BNXT_VF_TRUST;
 163	else
 164		vf->flags &= ~BNXT_VF_TRUST;
 165
 166	bnxt_hwrm_set_trusted_vf(bp, vf);
 167	return 0;
 168}
 169
 170int bnxt_get_vf_config(struct net_device *dev, int vf_id,
 171		       struct ifla_vf_info *ivi)
 172{
 173	struct bnxt *bp = netdev_priv(dev);
 174	struct bnxt_vf_info *vf;
 175	int rc;
 176
 177	rc = bnxt_vf_ndo_prep(bp, vf_id);
 178	if (rc)
 179		return rc;
 180
 181	ivi->vf = vf_id;
 182	vf = &bp->pf.vf[vf_id];
 183
 184	if (is_valid_ether_addr(vf->mac_addr))
 185		memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
 186	else
 187		memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
 188	ivi->max_tx_rate = vf->max_tx_rate;
 189	ivi->min_tx_rate = vf->min_tx_rate;
 190	ivi->vlan = vf->vlan;
 191	if (vf->flags & BNXT_VF_QOS)
 192		ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
 193	else
 194		ivi->qos = 0;
 195	ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
 196	ivi->trusted = bnxt_is_trusted_vf(bp, vf);
 197	if (!(vf->flags & BNXT_VF_LINK_FORCED))
 198		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
 199	else if (vf->flags & BNXT_VF_LINK_UP)
 200		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
 201	else
 202		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
 203
 204	return 0;
 205}
 206
 207int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
 208{
 209	struct hwrm_func_cfg_input req = {0};
 210	struct bnxt *bp = netdev_priv(dev);
 
 211	struct bnxt_vf_info *vf;
 212	int rc;
 213
 214	rc = bnxt_vf_ndo_prep(bp, vf_id);
 215	if (rc)
 216		return rc;
 217	/* reject bc or mc mac addr, zero mac addr means allow
 218	 * VF to use its own mac addr
 219	 */
 220	if (is_multicast_ether_addr(mac)) {
 221		netdev_err(dev, "Invalid VF ethernet address\n");
 222		return -EINVAL;
 223	}
 224	vf = &bp->pf.vf[vf_id];
 225
 
 
 
 
 226	memcpy(vf->mac_addr, mac, ETH_ALEN);
 227	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 228	req.fid = cpu_to_le16(vf->fw_fid);
 229	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
 230	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
 231	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 232}
 233
 234int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
 235		     __be16 vlan_proto)
 236{
 237	struct hwrm_func_cfg_input req = {0};
 238	struct bnxt *bp = netdev_priv(dev);
 
 239	struct bnxt_vf_info *vf;
 240	u16 vlan_tag;
 241	int rc;
 242
 243	if (bp->hwrm_spec_code < 0x10201)
 244		return -ENOTSUPP;
 245
 246	if (vlan_proto != htons(ETH_P_8021Q))
 247		return -EPROTONOSUPPORT;
 248
 249	rc = bnxt_vf_ndo_prep(bp, vf_id);
 250	if (rc)
 251		return rc;
 252
 253	/* TODO: needed to implement proper handling of user priority,
 254	 * currently fail the command if there is valid priority
 255	 */
 256	if (vlan_id > 4095 || qos)
 257		return -EINVAL;
 258
 259	vf = &bp->pf.vf[vf_id];
 260	vlan_tag = vlan_id;
 261	if (vlan_tag == vf->vlan)
 262		return 0;
 263
 264	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 265	req.fid = cpu_to_le16(vf->fw_fid);
 266	req.dflt_vlan = cpu_to_le16(vlan_tag);
 267	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
 268	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 269	if (!rc)
 270		vf->vlan = vlan_tag;
 
 
 271	return rc;
 272}
 273
 274int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
 275		   int max_tx_rate)
 276{
 277	struct hwrm_func_cfg_input req = {0};
 278	struct bnxt *bp = netdev_priv(dev);
 
 279	struct bnxt_vf_info *vf;
 280	u32 pf_link_speed;
 281	int rc;
 282
 283	rc = bnxt_vf_ndo_prep(bp, vf_id);
 284	if (rc)
 285		return rc;
 286
 287	vf = &bp->pf.vf[vf_id];
 288	pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
 289	if (max_tx_rate > pf_link_speed) {
 290		netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
 291			    max_tx_rate, vf_id);
 292		return -EINVAL;
 293	}
 294
 295	if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
 296		netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
 297			    min_tx_rate, vf_id);
 298		return -EINVAL;
 299	}
 300	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
 301		return 0;
 302	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 303	req.fid = cpu_to_le16(vf->fw_fid);
 304	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
 305	req.max_bw = cpu_to_le32(max_tx_rate);
 306	req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
 307	req.min_bw = cpu_to_le32(min_tx_rate);
 308	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 309	if (!rc) {
 310		vf->min_tx_rate = min_tx_rate;
 311		vf->max_tx_rate = max_tx_rate;
 
 
 
 
 
 
 
 
 312	}
 313	return rc;
 314}
 315
 316int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
 317{
 318	struct bnxt *bp = netdev_priv(dev);
 319	struct bnxt_vf_info *vf;
 320	int rc;
 321
 322	rc = bnxt_vf_ndo_prep(bp, vf_id);
 323	if (rc)
 324		return rc;
 325
 326	vf = &bp->pf.vf[vf_id];
 327
 328	vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
 329	switch (link) {
 330	case IFLA_VF_LINK_STATE_AUTO:
 331		vf->flags |= BNXT_VF_LINK_UP;
 332		break;
 333	case IFLA_VF_LINK_STATE_DISABLE:
 334		vf->flags |= BNXT_VF_LINK_FORCED;
 335		break;
 336	case IFLA_VF_LINK_STATE_ENABLE:
 337		vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
 338		break;
 339	default:
 340		netdev_err(bp->dev, "Invalid link option\n");
 341		rc = -EINVAL;
 342		break;
 343	}
 344	if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
 345		rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
 346			ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
 347	return rc;
 348}
 349
 350static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
 351{
 352	int i;
 353	struct bnxt_vf_info *vf;
 354
 355	for (i = 0; i < num_vfs; i++) {
 356		vf = &bp->pf.vf[i];
 357		memset(vf, 0, sizeof(*vf));
 358	}
 359	return 0;
 360}
 361
 362static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
 363{
 364	int i, rc = 0;
 365	struct bnxt_pf_info *pf = &bp->pf;
 366	struct hwrm_func_vf_resc_free_input req = {0};
 367
 368	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
 
 
 369
 370	mutex_lock(&bp->hwrm_cmd_lock);
 371	for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
 372		req.vf_id = cpu_to_le16(i);
 373		rc = _hwrm_send_message(bp, &req, sizeof(req),
 374					HWRM_CMD_TIMEOUT);
 375		if (rc)
 376			break;
 377	}
 378	mutex_unlock(&bp->hwrm_cmd_lock);
 379	return rc;
 380}
 381
 382static void bnxt_free_vf_resources(struct bnxt *bp)
 383{
 384	struct pci_dev *pdev = bp->pdev;
 385	int i;
 386
 387	kfree(bp->pf.vf_event_bmap);
 388	bp->pf.vf_event_bmap = NULL;
 389
 390	for (i = 0; i < 4; i++) {
 391		if (bp->pf.hwrm_cmd_req_addr[i]) {
 392			dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
 393					  bp->pf.hwrm_cmd_req_addr[i],
 394					  bp->pf.hwrm_cmd_req_dma_addr[i]);
 395			bp->pf.hwrm_cmd_req_addr[i] = NULL;
 396		}
 397	}
 398
 399	bp->pf.active_vfs = 0;
 400	kfree(bp->pf.vf);
 401	bp->pf.vf = NULL;
 402}
 403
 404static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
 405{
 406	struct pci_dev *pdev = bp->pdev;
 407	u32 nr_pages, size, i, j, k = 0;
 408
 409	bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
 410	if (!bp->pf.vf)
 411		return -ENOMEM;
 412
 413	bnxt_set_vf_attr(bp, num_vfs);
 414
 415	size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
 416	nr_pages = size / BNXT_PAGE_SIZE;
 417	if (size & (BNXT_PAGE_SIZE - 1))
 418		nr_pages++;
 419
 420	for (i = 0; i < nr_pages; i++) {
 421		bp->pf.hwrm_cmd_req_addr[i] =
 422			dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
 423					   &bp->pf.hwrm_cmd_req_dma_addr[i],
 424					   GFP_KERNEL);
 425
 426		if (!bp->pf.hwrm_cmd_req_addr[i])
 427			return -ENOMEM;
 428
 429		for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
 430			struct bnxt_vf_info *vf = &bp->pf.vf[k];
 431
 432			vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
 433						j * BNXT_HWRM_REQ_MAX_SIZE;
 434			vf->hwrm_cmd_req_dma_addr =
 435				bp->pf.hwrm_cmd_req_dma_addr[i] + j *
 436				BNXT_HWRM_REQ_MAX_SIZE;
 437			k++;
 438		}
 439	}
 440
 441	/* Max 128 VF's */
 442	bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
 443	if (!bp->pf.vf_event_bmap)
 444		return -ENOMEM;
 445
 446	bp->pf.hwrm_cmd_req_pages = nr_pages;
 447	return 0;
 448}
 449
 450static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 451{
 452	struct hwrm_func_buf_rgtr_input req = {0};
 
 453
 454	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
 
 
 455
 456	req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
 457	req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
 458	req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
 459	req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
 460	req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
 461	req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
 462	req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
 463
 464	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 465}
 466
 467/* Caller holds bp->hwrm_cmd_lock mutex lock */
 468static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
 469{
 470	struct hwrm_func_cfg_input req = {0};
 471	struct bnxt_vf_info *vf;
 
 
 
 
 
 472
 473	vf = &bp->pf.vf[vf_id];
 474	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 475	req.fid = cpu_to_le16(vf->fw_fid);
 476
 477	if (is_valid_ether_addr(vf->mac_addr)) {
 478		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
 479		memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
 480	}
 481	if (vf->vlan) {
 482		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
 483		req.dflt_vlan = cpu_to_le16(vf->vlan);
 484	}
 485	if (vf->max_tx_rate) {
 486		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
 487		req.max_bw = cpu_to_le32(vf->max_tx_rate);
 488#ifdef HAVE_IFLA_TX_RATE
 489		req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
 490		req.min_bw = cpu_to_le32(vf->min_tx_rate);
 491#endif
 492	}
 493	if (vf->flags & BNXT_VF_TRUST)
 494		req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
 495
 496	_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 497}
 498
 499/* Only called by PF to reserve resources for VFs, returns actual number of
 500 * VFs configured, or < 0 on error.
 501 */
 502static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
 503{
 504	struct hwrm_func_vf_resource_cfg_input req = {0};
 505	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 506	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
 507	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
 508	struct bnxt_pf_info *pf = &bp->pf;
 509	int i, rc = 0, min = 1;
 510	u16 vf_msix = 0;
 511	u16 vf_rss;
 512
 513	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
 
 
 514
 515	if (bp->flags & BNXT_FLAG_CHIP_P5) {
 516		vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
 517		vf_ring_grps = 0;
 518	} else {
 519		vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
 520	}
 521	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
 522	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
 523	if (bp->flags & BNXT_FLAG_AGG_RINGS)
 524		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
 525	else
 526		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
 527	vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
 528	vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
 529	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 530	vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
 531
 532	req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
 533	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
 534		min = 0;
 535		req.min_rsscos_ctx = cpu_to_le16(min);
 536	}
 537	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
 538	    pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
 539		req.min_cmpl_rings = cpu_to_le16(min);
 540		req.min_tx_rings = cpu_to_le16(min);
 541		req.min_rx_rings = cpu_to_le16(min);
 542		req.min_l2_ctxs = cpu_to_le16(min);
 543		req.min_vnics = cpu_to_le16(min);
 544		req.min_stat_ctx = cpu_to_le16(min);
 545		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
 546			req.min_hw_ring_grps = cpu_to_le16(min);
 547	} else {
 548		vf_cp_rings /= num_vfs;
 549		vf_tx_rings /= num_vfs;
 550		vf_rx_rings /= num_vfs;
 551		vf_vnics /= num_vfs;
 
 
 
 
 
 
 
 
 552		vf_stat_ctx /= num_vfs;
 553		vf_ring_grps /= num_vfs;
 554		vf_rss /= num_vfs;
 555
 556		req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
 557		req.min_tx_rings = cpu_to_le16(vf_tx_rings);
 558		req.min_rx_rings = cpu_to_le16(vf_rx_rings);
 559		req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
 560		req.min_vnics = cpu_to_le16(vf_vnics);
 561		req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
 562		req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
 563		req.min_rsscos_ctx = cpu_to_le16(vf_rss);
 564	}
 565	req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
 566	req.max_tx_rings = cpu_to_le16(vf_tx_rings);
 567	req.max_rx_rings = cpu_to_le16(vf_rx_rings);
 568	req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
 569	req.max_vnics = cpu_to_le16(vf_vnics);
 570	req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
 571	req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
 572	req.max_rsscos_ctx = cpu_to_le16(vf_rss);
 573	if (bp->flags & BNXT_FLAG_CHIP_P5)
 574		req.max_msix = cpu_to_le16(vf_msix / num_vfs);
 
 575
 576	mutex_lock(&bp->hwrm_cmd_lock);
 577	for (i = 0; i < num_vfs; i++) {
 578		if (reset)
 579			__bnxt_set_vf_params(bp, i);
 580
 581		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
 582		rc = _hwrm_send_message(bp, &req, sizeof(req),
 583					HWRM_CMD_TIMEOUT);
 584		if (rc)
 585			break;
 586		pf->active_vfs = i + 1;
 587		pf->vf[i].fw_fid = pf->first_vf_id + i;
 588	}
 589	mutex_unlock(&bp->hwrm_cmd_lock);
 590	if (pf->active_vfs) {
 591		u16 n = pf->active_vfs;
 592
 593		hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
 594		hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
 595		hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
 596					     n;
 597		hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
 598		hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
 599		hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
 600		hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
 601		if (bp->flags & BNXT_FLAG_CHIP_P5)
 602			hw_resc->max_irqs -= vf_msix * n;
 
 603
 604		rc = pf->active_vfs;
 605	}
 
 606	return rc;
 607}
 608
 609/* Only called by PF to reserve resources for VFs, returns actual number of
 610 * VFs configured, or < 0 on error.
 611 */
 612static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 613{
 614	u32 rc = 0, mtu, i;
 615	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
 616	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 617	struct hwrm_func_cfg_input req = {0};
 618	struct bnxt_pf_info *pf = &bp->pf;
 
 619	int total_vf_tx_rings = 0;
 620	u16 vf_ring_grps;
 
 
 621
 622	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 
 
 623
 624	/* Remaining rings are distributed equally amongs VF's for now */
 625	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
 626	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
 627	if (bp->flags & BNXT_FLAG_AGG_RINGS)
 628		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
 629			      num_vfs;
 630	else
 631		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
 632			      num_vfs;
 633	vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
 634	vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
 635	vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
 636	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 637
 638	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
 639				  FUNC_CFG_REQ_ENABLES_MRU |
 640				  FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
 641				  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
 642				  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
 643				  FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
 644				  FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
 645				  FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
 646				  FUNC_CFG_REQ_ENABLES_NUM_VNICS |
 647				  FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
 648
 649	mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
 650	req.mru = cpu_to_le16(mtu);
 651	req.mtu = cpu_to_le16(mtu);
 652
 653	req.num_rsscos_ctxs = cpu_to_le16(1);
 654	req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
 655	req.num_tx_rings = cpu_to_le16(vf_tx_rings);
 656	req.num_rx_rings = cpu_to_le16(vf_rx_rings);
 657	req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
 658	req.num_l2_ctxs = cpu_to_le16(4);
 659
 660	req.num_vnics = cpu_to_le16(vf_vnics);
 661	/* FIXME spec currently uses 1 bit for stats ctx */
 662	req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
 663
 664	mutex_lock(&bp->hwrm_cmd_lock);
 665	for (i = 0; i < num_vfs; i++) {
 666		int vf_tx_rsvd = vf_tx_rings;
 667
 668		req.fid = cpu_to_le16(pf->first_vf_id + i);
 669		rc = _hwrm_send_message(bp, &req, sizeof(req),
 670					HWRM_CMD_TIMEOUT);
 671		if (rc)
 672			break;
 673		pf->active_vfs = i + 1;
 674		pf->vf[i].fw_fid = le16_to_cpu(req.fid);
 675		rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
 676					      &vf_tx_rsvd);
 677		if (rc)
 678			break;
 679		total_vf_tx_rings += vf_tx_rsvd;
 680	}
 681	mutex_unlock(&bp->hwrm_cmd_lock);
 682	if (pf->active_vfs) {
 683		hw_resc->max_tx_rings -= total_vf_tx_rings;
 684		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
 685		hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
 686		hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
 687		hw_resc->max_rsscos_ctxs -= num_vfs;
 688		hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
 689		hw_resc->max_vnics -= vf_vnics * num_vfs;
 690		rc = pf->active_vfs;
 691	}
 692	return rc;
 693}
 694
 695static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
 696{
 697	if (BNXT_NEW_RM(bp))
 698		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
 699	else
 700		return bnxt_hwrm_func_cfg(bp, num_vfs);
 701}
 702
 703int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
 704{
 705	int rc;
 706
 707	/* Register buffers for VFs */
 708	rc = bnxt_hwrm_func_buf_rgtr(bp);
 709	if (rc)
 710		return rc;
 711
 712	/* Reserve resources for VFs */
 713	rc = bnxt_func_cfg(bp, *num_vfs, reset);
 714	if (rc != *num_vfs) {
 715		if (rc <= 0) {
 716			netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
 717			*num_vfs = 0;
 718			return rc;
 719		}
 720		netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
 721			    rc);
 722		*num_vfs = rc;
 723	}
 724
 725	bnxt_ulp_sriov_cfg(bp, *num_vfs);
 726	return 0;
 727}
 728
 729static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 730{
 731	int rc = 0, vfs_supported;
 732	int min_rx_rings, min_tx_rings, min_rss_ctxs;
 733	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 734	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
 735	int avail_cp, avail_stat;
 736
 737	/* Check if we can enable requested num of vf's. At a mininum
 738	 * we require 1 RX 1 TX rings for each VF. In this minimum conf
 739	 * features like TPA will not be available.
 740	 */
 741	vfs_supported = *num_vfs;
 742
 743	avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
 744	avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
 745	avail_cp = min_t(int, avail_cp, avail_stat);
 746
 747	while (vfs_supported) {
 748		min_rx_rings = vfs_supported;
 749		min_tx_rings = vfs_supported;
 750		min_rss_ctxs = vfs_supported;
 751
 752		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
 753			if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
 754			    min_rx_rings)
 755				rx_ok = 1;
 756		} else {
 757			if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
 758			    min_rx_rings)
 759				rx_ok = 1;
 760		}
 761		if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
 762		    avail_cp < min_rx_rings)
 763			rx_ok = 0;
 764
 765		if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
 766		    avail_cp >= min_tx_rings)
 767			tx_ok = 1;
 768
 769		if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
 770		    min_rss_ctxs)
 771			rss_ok = 1;
 772
 773		if (tx_ok && rx_ok && rss_ok)
 774			break;
 775
 776		vfs_supported--;
 777	}
 778
 779	if (!vfs_supported) {
 780		netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
 781		return -EINVAL;
 782	}
 783
 784	if (vfs_supported != *num_vfs) {
 785		netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
 786			    *num_vfs, vfs_supported);
 787		*num_vfs = vfs_supported;
 788	}
 789
 790	rc = bnxt_alloc_vf_resources(bp, *num_vfs);
 791	if (rc)
 792		goto err_out1;
 793
 794	rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
 795	if (rc)
 796		goto err_out2;
 797
 798	rc = pci_enable_sriov(bp->pdev, *num_vfs);
 799	if (rc)
 800		goto err_out2;
 801
 
 
 
 
 
 
 
 
 
 
 
 
 802	return 0;
 803
 
 
 
 
 804err_out2:
 805	/* Free the resources reserved for various VF's */
 806	bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
 807
 
 
 
 808err_out1:
 809	bnxt_free_vf_resources(bp);
 810
 811	return rc;
 812}
 813
 814void bnxt_sriov_disable(struct bnxt *bp)
 815{
 816	u16 num_vfs = pci_num_vf(bp->pdev);
 817
 818	if (!num_vfs)
 819		return;
 820
 821	/* synchronize VF and VF-rep create and destroy */
 822	mutex_lock(&bp->sriov_lock);
 823	bnxt_vf_reps_destroy(bp);
 824
 825	if (pci_vfs_assigned(bp->pdev)) {
 826		bnxt_hwrm_fwd_async_event_cmpl(
 827			bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
 828		netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
 829			    num_vfs);
 830	} else {
 831		pci_disable_sriov(bp->pdev);
 832		/* Free the HW resources reserved for various VF's */
 833		bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
 834	}
 835	mutex_unlock(&bp->sriov_lock);
 836
 837	bnxt_free_vf_resources(bp);
 838
 839	/* Reclaim all resources for the PF. */
 840	rtnl_lock();
 841	bnxt_restore_pf_fw_resources(bp);
 842	rtnl_unlock();
 843
 844	bnxt_ulp_sriov_cfg(bp, 0);
 845}
 846
 847int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
 848{
 849	struct net_device *dev = pci_get_drvdata(pdev);
 850	struct bnxt *bp = netdev_priv(dev);
 851
 852	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
 853		netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
 854		return 0;
 855	}
 856
 857	rtnl_lock();
 858	if (!netif_running(dev)) {
 859		netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
 860		rtnl_unlock();
 861		return 0;
 862	}
 863	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
 864		netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
 865		rtnl_unlock();
 866		return 0;
 867	}
 868	bp->sriov_cfg = true;
 869	rtnl_unlock();
 870
 871	if (pci_vfs_assigned(bp->pdev)) {
 872		netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
 873		num_vfs = 0;
 874		goto sriov_cfg_exit;
 875	}
 876
 877	/* Check if enabled VFs is same as requested */
 878	if (num_vfs && num_vfs == bp->pf.active_vfs)
 879		goto sriov_cfg_exit;
 880
 881	/* if there are previous existing VFs, clean them up */
 882	bnxt_sriov_disable(bp);
 883	if (!num_vfs)
 884		goto sriov_cfg_exit;
 885
 886	bnxt_sriov_enable(bp, &num_vfs);
 887
 888sriov_cfg_exit:
 889	bp->sriov_cfg = false;
 890	wake_up(&bp->sriov_cfg_wait);
 891
 892	return num_vfs;
 893}
 894
 895static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 896			      void *encap_resp, __le64 encap_resp_addr,
 897			      __le16 encap_resp_cpr, u32 msg_size)
 898{
 899	int rc = 0;
 900	struct hwrm_fwd_resp_input req = {0};
 901
 902	if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
 903		return -EINVAL;
 904
 905	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
 
 
 
 
 
 
 
 
 906
 907	/* Set the new target id */
 908	req.target_id = cpu_to_le16(vf->fw_fid);
 909	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
 910	req.encap_resp_len = cpu_to_le16(msg_size);
 911	req.encap_resp_addr = encap_resp_addr;
 912	req.encap_resp_cmpl_ring = encap_resp_cpr;
 913	memcpy(req.encap_resp, encap_resp, msg_size);
 914
 915	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 916	if (rc)
 917		netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
 918	return rc;
 919}
 920
 921static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 922				  u32 msg_size)
 923{
 924	int rc = 0;
 925	struct hwrm_reject_fwd_resp_input req = {0};
 926
 927	if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
 928		return -EINVAL;
 929
 930	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
 931	/* Set the new target id */
 932	req.target_id = cpu_to_le16(vf->fw_fid);
 933	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
 934	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
 
 935
 936	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 937	if (rc)
 938		netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
 939	return rc;
 940}
 941
 942static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 943				   u32 msg_size)
 944{
 945	int rc = 0;
 946	struct hwrm_exec_fwd_resp_input req = {0};
 947
 948	if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
 949		return -EINVAL;
 950
 951	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
 952	/* Set the new target id */
 953	req.target_id = cpu_to_le16(vf->fw_fid);
 954	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
 955	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
 
 956
 957	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 
 958	if (rc)
 959		netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
 960	return rc;
 961}
 962
 963static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
 964{
 965	u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
 966	struct hwrm_func_vf_cfg_input *req =
 967		(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
 968
 969	/* Allow VF to set a valid MAC address, if trust is set to on or
 970	 * if the PF assigned MAC address is zero
 971	 */
 972	if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
 973		bool trust = bnxt_is_trusted_vf(bp, vf);
 974
 975		if (is_valid_ether_addr(req->dflt_mac_addr) &&
 976		    (trust || !is_valid_ether_addr(vf->mac_addr) ||
 977		     ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
 978			ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
 979			return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
 980		}
 981		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
 982	}
 983	return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
 984}
 985
 986static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
 987{
 988	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
 989	struct hwrm_cfa_l2_filter_alloc_input *req =
 990		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
 991	bool mac_ok = false;
 992
 993	if (!is_valid_ether_addr((const u8 *)req->l2_addr))
 994		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
 995
 996	/* Allow VF to set a valid MAC address, if trust is set to on.
 997	 * Or VF MAC address must first match MAC address in PF's context.
 998	 * Otherwise, it must match the VF MAC address if firmware spec >=
 999	 * 1.2.2
1000	 */
1001	if (bnxt_is_trusted_vf(bp, vf)) {
1002		mac_ok = true;
1003	} else if (is_valid_ether_addr(vf->mac_addr)) {
1004		if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1005			mac_ok = true;
1006	} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1007		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1008			mac_ok = true;
1009	} else {
1010		/* There are two cases:
1011		 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1012		 *   to the PF and so it doesn't have to match
1013		 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1014		 *   valid MAC address and firmware spec >= 0x10202
1015		 */
1016		mac_ok = true;
1017	}
1018	if (mac_ok)
1019		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1020	return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1021}
1022
1023static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1024{
1025	int rc = 0;
1026
1027	if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1028		/* real link */
1029		rc = bnxt_hwrm_exec_fwd_resp(
1030			bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1031	} else {
1032		struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0};
1033		struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1034
1035		phy_qcfg_req =
1036		(struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1037		mutex_lock(&bp->hwrm_cmd_lock);
1038		memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1039		       sizeof(phy_qcfg_resp));
1040		mutex_unlock(&bp->hwrm_cmd_lock);
1041		phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1042		phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1043		phy_qcfg_resp.valid = 1;
1044
1045		if (vf->flags & BNXT_VF_LINK_UP) {
1046			/* if physical link is down, force link up on VF */
1047			if (phy_qcfg_resp.link !=
1048			    PORT_PHY_QCFG_RESP_LINK_LINK) {
1049				phy_qcfg_resp.link =
1050					PORT_PHY_QCFG_RESP_LINK_LINK;
1051				phy_qcfg_resp.link_speed = cpu_to_le16(
1052					PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1053				phy_qcfg_resp.duplex_cfg =
1054					PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1055				phy_qcfg_resp.duplex_state =
1056					PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1057				phy_qcfg_resp.pause =
1058					(PORT_PHY_QCFG_RESP_PAUSE_TX |
1059					 PORT_PHY_QCFG_RESP_PAUSE_RX);
1060			}
1061		} else {
1062			/* force link down */
1063			phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1064			phy_qcfg_resp.link_speed = 0;
1065			phy_qcfg_resp.duplex_state =
1066				PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1067			phy_qcfg_resp.pause = 0;
1068		}
1069		rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1070					phy_qcfg_req->resp_addr,
1071					phy_qcfg_req->cmpl_ring,
1072					sizeof(phy_qcfg_resp));
1073	}
1074	return rc;
1075}
1076
1077static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1078{
1079	int rc = 0;
1080	struct input *encap_req = vf->hwrm_cmd_req_addr;
1081	u32 req_type = le16_to_cpu(encap_req->req_type);
1082
1083	switch (req_type) {
1084	case HWRM_FUNC_VF_CFG:
1085		rc = bnxt_vf_configure_mac(bp, vf);
1086		break;
1087	case HWRM_CFA_L2_FILTER_ALLOC:
1088		rc = bnxt_vf_validate_set_mac(bp, vf);
1089		break;
1090	case HWRM_FUNC_CFG:
1091		/* TODO Validate if VF is allowed to change mac address,
1092		 * mtu, num of rings etc
1093		 */
1094		rc = bnxt_hwrm_exec_fwd_resp(
1095			bp, vf, sizeof(struct hwrm_func_cfg_input));
1096		break;
1097	case HWRM_PORT_PHY_QCFG:
1098		rc = bnxt_vf_set_link(bp, vf);
1099		break;
1100	default:
1101		break;
1102	}
1103	return rc;
1104}
1105
1106void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1107{
1108	u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1109
1110	/* Scan through VF's and process commands */
1111	while (1) {
1112		vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1113		if (vf_id >= active_vfs)
1114			break;
1115
1116		clear_bit(vf_id, bp->pf.vf_event_bmap);
1117		bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1118		i = vf_id + 1;
1119	}
1120}
1121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122void bnxt_update_vf_mac(struct bnxt *bp)
1123{
1124	struct hwrm_func_qcaps_input req = {0};
1125	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
 
1126
1127	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
1128	req.fid = cpu_to_le16(0xffff);
 
 
1129
1130	mutex_lock(&bp->hwrm_cmd_lock);
1131	if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
1132		goto update_vf_mac_exit;
1133
1134	/* Store MAC address from the firmware.  There are 2 cases:
1135	 * 1. MAC address is valid.  It is assigned from the PF and we
1136	 *    need to override the current VF MAC address with it.
1137	 * 2. MAC address is zero.  The VF will use a random MAC address by
1138	 *    default but the stored zero MAC will allow the VF user to change
1139	 *    the random MAC address using ndo_set_mac_address() if he wants.
1140	 */
1141	if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
1142		memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
 
 
 
 
 
 
1143
1144	/* overwrite netdev dev_addr with admin VF MAC */
1145	if (is_valid_ether_addr(bp->vf.mac_addr))
1146		memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
1147update_vf_mac_exit:
1148	mutex_unlock(&bp->hwrm_cmd_lock);
 
 
1149}
1150
1151int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1152{
1153	struct hwrm_func_vf_cfg_input req = {0};
1154	int rc = 0;
1155
1156	if (!BNXT_VF(bp))
1157		return 0;
1158
1159	if (bp->hwrm_spec_code < 0x10202) {
1160		if (is_valid_ether_addr(bp->vf.mac_addr))
1161			rc = -EADDRNOTAVAIL;
1162		goto mac_done;
1163	}
1164	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
1165	req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1166	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1167	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1168mac_done:
1169	if (rc && strict) {
1170		rc = -EADDRNOTAVAIL;
1171		netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1172			    mac);
1173		return rc;
1174	}
1175	return 0;
1176}
1177#else
1178
1179int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1180{
1181	if (*num_vfs)
1182		return -EOPNOTSUPP;
1183	return 0;
1184}
1185
1186void bnxt_sriov_disable(struct bnxt *bp)
1187{
1188}
1189
1190void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1191{
1192	netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1193}
1194
1195void bnxt_update_vf_mac(struct bnxt *bp)
1196{
1197}
1198
1199int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1200{
1201	return 0;
1202}
1203#endif