Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice.h"
 
   5#include "ice_lib.h"
 
   6
   7/**
   8 * ice_err_to_virt err - translate errors for VF return code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   9 * @ice_err: error return code
  10 */
  11static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
  12{
  13	switch (ice_err) {
  14	case ICE_SUCCESS:
  15		return VIRTCHNL_STATUS_SUCCESS;
  16	case ICE_ERR_BAD_PTR:
  17	case ICE_ERR_INVAL_SIZE:
  18	case ICE_ERR_DEVICE_NOT_SUPPORTED:
  19	case ICE_ERR_PARAM:
  20	case ICE_ERR_CFG:
  21		return VIRTCHNL_STATUS_ERR_PARAM;
  22	case ICE_ERR_NO_MEMORY:
  23		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
  24	case ICE_ERR_NOT_READY:
  25	case ICE_ERR_RESET_FAILED:
  26	case ICE_ERR_FW_API_VER:
  27	case ICE_ERR_AQ_ERROR:
  28	case ICE_ERR_AQ_TIMEOUT:
  29	case ICE_ERR_AQ_FULL:
  30	case ICE_ERR_AQ_NO_WORK:
  31	case ICE_ERR_AQ_EMPTY:
  32		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
  33	default:
  34		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
  35	}
  36}
  37
  38/**
  39 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
  40 * @pf: pointer to the PF structure
  41 * @v_opcode: operation code
  42 * @v_retval: return value
  43 * @msg: pointer to the msg buffer
  44 * @msglen: msg length
  45 */
  46static void
  47ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
  48		    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
  49{
  50	struct ice_hw *hw = &pf->hw;
  51	struct ice_vf *vf = pf->vf;
  52	int i;
 
 
  53
  54	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  55		/* Not all vfs are enabled so skip the ones that are not */
  56		if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
  57		    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
  58			continue;
  59
  60		/* Ignore return value on purpose - a given VF may fail, but
  61		 * we need to keep going and send to all of them
  62		 */
  63		ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
  64				      msglen, NULL);
  65	}
  66}
  67
  68/**
  69 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
  70 * @vf: pointer to the VF structure
  71 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
  72 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
  73 * @link_up: whether or not to set the link up/down
  74 */
  75static void
  76ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
  77		 int ice_link_speed, bool link_up)
  78{
  79	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
  80		pfe->event_data.link_event_adv.link_status = link_up;
  81		/* Speed in Mbps */
  82		pfe->event_data.link_event_adv.link_speed =
  83			ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
  84	} else {
  85		pfe->event_data.link_event.link_status = link_up;
  86		/* Legacy method for virtchnl link speeds */
  87		pfe->event_data.link_event.link_speed =
  88			(enum virtchnl_link_speed)
  89			ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
  90	}
  91}
  92
  93/**
  94 * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
  95 * @vf: pointer to the VF structure
  96 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
  97 * @link_up: whether or not to set the link up/down
 
  98 */
  99static void
 100ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
 101			bool link_up)
 102{
 103	u16 link_speed;
 
 
 104
 105	if (link_up)
 106		link_speed = ICE_AQ_LINK_SPEED_100GB;
 107	else
 108		link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
 
 
 
 
 
 
 109
 110	ice_set_pfe_link(vf, pfe, link_speed, link_up);
 
 
 
 
 
 
 111}
 112
 113/**
 114 * ice_vc_notify_vf_link_state - Inform a VF of link status
 115 * @vf: pointer to the VF structure
 116 *
 117 * send a link status message to a single VF
 118 */
 119static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
 120{
 121	struct virtchnl_pf_event pfe = { 0 };
 122	struct ice_link_status *ls;
 123	struct ice_pf *pf = vf->pf;
 124	struct ice_hw *hw;
 125
 126	hw = &pf->hw;
 127	ls = &hw->port_info->phy.link_info;
 128
 129	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
 130	pfe.severity = PF_EVENT_SEVERITY_INFO;
 131
 132	/* Always report link is down if the VF queues aren't enabled */
 133	if (!vf->num_qs_ena)
 134		ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
 135	else if (vf->link_forced)
 136		ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
 137	else
 138		ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
 139				 ICE_AQ_LINK_UP);
 140
 141	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
 142			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
 143			      sizeof(pfe), NULL);
 144}
 145
 146/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 147 * ice_free_vf_res - Free a VF's resources
 148 * @vf: pointer to the VF info
 149 */
 150static void ice_free_vf_res(struct ice_vf *vf)
 151{
 152	struct ice_pf *pf = vf->pf;
 153	int i, last_vector_idx;
 154
 155	/* First, disable VF's configuration API to prevent OS from
 156	 * accessing the VF's VSI after it's freed or invalidated.
 157	 */
 158	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 159
 160	/* free VSI and disconnect it from the parent uplink */
 161	if (vf->lan_vsi_idx) {
 162		ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
 163		vf->lan_vsi_idx = 0;
 164		vf->lan_vsi_num = 0;
 165		vf->num_mac = 0;
 166	}
 167
 168	last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
 
 
 
 
 
 169	/* Disable interrupts so that VF starts in a known state */
 170	for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
 171		wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
 172		ice_flush(&pf->hw);
 173	}
 174	/* reset some of the state variables keeping track of the resources */
 175	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 176	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 177}
 178
 179/**
 180 * ice_dis_vf_mappings
 181 * @vf: pointer to the VF structure
 182 */
 183static void ice_dis_vf_mappings(struct ice_vf *vf)
 184{
 185	struct ice_pf *pf = vf->pf;
 186	struct ice_vsi *vsi;
 
 187	int first, last, v;
 188	struct ice_hw *hw;
 189
 190	hw = &pf->hw;
 191	vsi = pf->vsi[vf->lan_vsi_idx];
 192
 
 193	wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
 194	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
 195
 196	first = vf->first_vector_idx;
 197	last = first + pf->num_vf_msix - 1;
 198	for (v = first; v <= last; v++) {
 199		u32 reg;
 200
 201		reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
 202			GLINT_VECT2FUNC_IS_PF_M) |
 203		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 204			GLINT_VECT2FUNC_PF_NUM_M));
 205		wr32(hw, GLINT_VECT2FUNC(v), reg);
 206	}
 207
 208	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
 209		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
 210	else
 211		dev_err(&pf->pdev->dev,
 212			"Scattered mode for VF Tx queues is not yet implemented\n");
 213
 214	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
 215		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
 216	else
 217		dev_err(&pf->pdev->dev,
 218			"Scattered mode for VF Rx queues is not yet implemented\n");
 219}
 220
 221/**
 222 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
 223 * @pf: pointer to the PF structure
 224 *
 225 * If MSIX entries from the pf->irq_tracker were needed then we need to
 226 * reset the irq_tracker->end and give back the entries we needed to
 227 * num_avail_sw_msix.
 228 *
 229 * If no MSIX entries were taken from the pf->irq_tracker then just clear
 230 * the pf->sriov_base_vector.
 231 *
 232 * Returns 0 on success, and -EINVAL on error.
 233 */
 234static int ice_sriov_free_msix_res(struct ice_pf *pf)
 235{
 236	struct ice_res_tracker *res;
 237
 238	if (!pf)
 239		return -EINVAL;
 240
 241	res = pf->irq_tracker;
 242	if (!res)
 243		return -EINVAL;
 244
 245	/* give back irq_tracker resources used */
 246	if (pf->sriov_base_vector < res->num_entries) {
 247		res->end = res->num_entries;
 248		pf->num_avail_sw_msix +=
 249			res->num_entries - pf->sriov_base_vector;
 250	}
 251
 252	pf->sriov_base_vector = 0;
 253
 254	return 0;
 255}
 256
 257/**
 258 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
 259 * @vf: pointer to the VF structure
 260 */
 261void ice_set_vf_state_qs_dis(struct ice_vf *vf)
 262{
 263	/* Clear Rx/Tx enabled queues flag */
 264	bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
 265	bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
 266	vf->num_qs_ena = 0;
 267	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
 268}
 269
 270/**
 271 * ice_dis_vf_qs - Disable the VF queues
 272 * @vf: pointer to the VF structure
 273 */
 274static void ice_dis_vf_qs(struct ice_vf *vf)
 275{
 276	struct ice_pf *pf = vf->pf;
 277	struct ice_vsi *vsi;
 278
 279	vsi = pf->vsi[vf->lan_vsi_idx];
 280
 281	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
 282	ice_vsi_stop_rx_rings(vsi);
 283	ice_set_vf_state_qs_dis(vf);
 284}
 285
 286/**
 287 * ice_free_vfs - Free all VFs
 288 * @pf: pointer to the PF structure
 289 */
 290void ice_free_vfs(struct ice_pf *pf)
 291{
 
 292	struct ice_hw *hw = &pf->hw;
 293	int tmp, i;
 294
 295	if (!pf->vf)
 296		return;
 297
 298	while (test_and_set_bit(__ICE_VF_DIS, pf->state))
 299		usleep_range(1000, 2000);
 300
 301	/* Avoid wait time by stopping all VFs at the same time */
 302	for (i = 0; i < pf->num_alloc_vfs; i++)
 303		if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
 304			ice_dis_vf_qs(&pf->vf[i]);
 305
 306	/* Disable IOV before freeing resources. This lets any VF drivers
 307	 * running in the host get themselves cleaned up before we yank
 308	 * the carpet out from underneath their feet.
 309	 */
 310	if (!pci_vfs_assigned(pf->pdev))
 311		pci_disable_sriov(pf->pdev);
 312	else
 313		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
 
 
 
 
 
 314
 315	tmp = pf->num_alloc_vfs;
 316	pf->num_vf_qps = 0;
 317	pf->num_alloc_vfs = 0;
 318	for (i = 0; i < tmp; i++) {
 319		if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
 320			/* disable VF qp mappings */
 321			ice_dis_vf_mappings(&pf->vf[i]);
 
 322			ice_free_vf_res(&pf->vf[i]);
 323		}
 324	}
 325
 326	if (ice_sriov_free_msix_res(pf))
 327		dev_err(&pf->pdev->dev,
 328			"Failed to free MSIX resources used by SR-IOV\n");
 329
 330	devm_kfree(&pf->pdev->dev, pf->vf);
 331	pf->vf = NULL;
 332
 333	/* This check is for when the driver is unloaded while VFs are
 334	 * assigned. Setting the number of VFs to 0 through sysfs is caught
 335	 * before this function ever gets called.
 336	 */
 337	if (!pci_vfs_assigned(pf->pdev)) {
 338		int vf_id;
 339
 340		/* Acknowledge VFLR for all VFs. Without this, VFs will fail to
 341		 * work correctly when SR-IOV gets re-enabled.
 342		 */
 343		for (vf_id = 0; vf_id < tmp; vf_id++) {
 344			u32 reg_idx, bit_idx;
 345
 346			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 347			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
 348			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 349		}
 350	}
 351	clear_bit(__ICE_VF_DIS, pf->state);
 352	clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 353}
 354
 355/**
 356 * ice_trigger_vf_reset - Reset a VF on HW
 357 * @vf: pointer to the VF structure
 358 * @is_vflr: true if VFLR was issued, false if not
 359 * @is_pfr: true if the reset was triggered due to a previous PFR
 360 *
 361 * Trigger hardware to start a reset for a particular VF. Expects the caller
 362 * to wait the proper amount of time to allow hardware to reset the VF before
 363 * it cleans up and restores VF functionality.
 364 */
 365static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
 366{
 367	struct ice_pf *pf = vf->pf;
 368	u32 reg, reg_idx, bit_idx;
 
 
 369	struct ice_hw *hw;
 370	int vf_abs_id, i;
 371
 
 372	hw = &pf->hw;
 373	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 374
 375	/* Inform VF that it is no longer active, as a warning */
 376	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 377
 378	/* Disable VF's configuration API during reset. The flag is re-enabled
 379	 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
 380	 * It's normally disabled in ice_free_vf_res(), but it's safer
 381	 * to do it earlier to give some time to finish to any VF config
 382	 * functions that may still be running at this point.
 383	 */
 384	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 385
 386	/* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
 387	 * in the case of VFR. If this is done for PFR, it can mess up VF
 388	 * resets because the VF driver may already have started cleanup
 389	 * by the time we get here.
 390	 */
 391	if (!is_pfr)
 392		wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
 393
 394	/* In the case of a VFLR, the HW has already reset the VF and we
 395	 * just need to clean up, so don't hit the VFRTRIG register.
 396	 */
 397	if (!is_vflr) {
 398		/* reset VF using VPGEN_VFRTRIG reg */
 399		reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 400		reg |= VPGEN_VFRTRIG_VFSWR_M;
 401		wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 402	}
 403	/* clear the VFLR bit in GLGEN_VFLRSTAT */
 404	reg_idx = (vf_abs_id) / 32;
 405	bit_idx = (vf_abs_id) % 32;
 406	wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 407	ice_flush(hw);
 408
 409	wr32(hw, PF_PCI_CIAA,
 410	     VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
 411	for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
 412		reg = rd32(hw, PF_PCI_CIAD);
 413		/* no transactions pending so stop polling */
 414		if ((reg & VF_TRANS_PENDING_M) == 0)
 415			break;
 416
 417		dev_err(&pf->pdev->dev,
 418			"VF %d PCI transactions stuck\n", vf->vf_id);
 419		udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
 420	}
 421}
 422
 423/**
 424 * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
 425 * @ctxt: the VSI ctxt to fill
 426 * @vid: the VLAN ID to set as a PVID
 427 */
 428static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
 429{
 430	ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
 431				 ICE_AQ_VSI_PVLAN_INSERT_PVID |
 432				 ICE_AQ_VSI_VLAN_EMOD_STR);
 433	ctxt->info.pvid = cpu_to_le16(vid);
 434	ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 435	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
 436						ICE_AQ_VSI_PROP_SW_VALID);
 437}
 438
 439/**
 440 * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
 441 * @ctxt: the VSI ctxt to fill
 442 */
 443static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
 444{
 445	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
 446	ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
 447	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 448	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
 449						ICE_AQ_VSI_PROP_SW_VALID);
 450}
 451
 452/**
 453 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
 454 * @vsi: the VSI to update
 455 * @vid: the VLAN ID to set as a PVID
 456 * @enable: true for enable PVID false for disable
 457 */
 458static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
 459{
 460	struct device *dev = &vsi->back->pdev->dev;
 461	struct ice_hw *hw = &vsi->back->hw;
 
 462	struct ice_vsi_ctx *ctxt;
 463	enum ice_status status;
 464	int ret = 0;
 465
 466	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
 467	if (!ctxt)
 468		return -ENOMEM;
 469
 470	ctxt->info = vsi->info;
 471	if (enable)
 472		ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
 473	else
 474		ice_vsi_kill_pvid_fill_ctxt(ctxt);
 
 
 
 
 
 
 
 
 
 
 
 475
 476	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
 477	if (status) {
 478		dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
 479			 status, hw->adminq.sq_last_status);
 
 480		ret = -EIO;
 481		goto out;
 482	}
 483
 484	vsi->info = ctxt->info;
 
 
 485out:
 486	devm_kfree(dev, ctxt);
 487	return ret;
 488}
 489
 490/**
 
 
 
 
 
 
 
 
 
 491 * ice_vf_vsi_setup - Set up a VF VSI
 492 * @pf: board private structure
 493 * @pi: pointer to the port_info instance
 494 * @vf_id: defines VF ID to which this VSI connects.
 495 *
 496 * Returns pointer to the successfully allocated VSI struct on success,
 497 * otherwise returns NULL on failure.
 498 */
 499static struct ice_vsi *
 500ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
 501{
 502	return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 503}
 504
 505/**
 506 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
 507 * @pf: pointer to PF structure
 508 * @vf: pointer to VF that the first MSIX vector index is being calculated for
 509 *
 510 * This returns the first MSIX vector index in PF space that is used by this VF.
 511 * This index is used when accessing PF relative registers such as
 512 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
 513 * This will always be the OICR index in the AVF driver so any functionality
 514 * using vf->first_vector_idx for queue configuration will have to increment by
 515 * 1 to avoid meddling with the OICR index.
 516 */
 517static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
 518{
 519	return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
 520}
 521
 522/**
 523 * ice_alloc_vsi_res - Setup VF VSI and its resources
 524 * @vf: pointer to the VF structure
 525 *
 526 * Returns 0 on success, negative value on failure
 
 527 */
 528static int ice_alloc_vsi_res(struct ice_vf *vf)
 529{
 530	struct ice_pf *pf = vf->pf;
 531	LIST_HEAD(tmp_add_list);
 532	u8 broadcast[ETH_ALEN];
 533	struct ice_vsi *vsi;
 534	int status = 0;
 535
 536	/* first vector index is the VFs OICR index */
 537	vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
 
 
 
 
 
 538
 539	vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
 540	if (!vsi) {
 541		dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
 542		return -ENOMEM;
 543	}
 544
 545	vf->lan_vsi_idx = vsi->idx;
 546	vf->lan_vsi_num = vsi->vsi_num;
 547
 548	/* Check if port VLAN exist before, and restore it accordingly */
 549	if (vf->port_vlan_id) {
 550		ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
 551		ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
 552	}
 553
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 554	eth_broadcast_addr(broadcast);
 
 
 
 
 
 
 555
 556	status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
 557	if (status)
 558		goto ice_alloc_vsi_res_exit;
 559
 560	if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
 561		status = ice_add_mac_to_list(vsi, &tmp_add_list,
 562					     vf->dflt_lan_addr.addr);
 563		if (status)
 564			goto ice_alloc_vsi_res_exit;
 
 
 
 
 
 565	}
 566
 567	status = ice_add_mac(&pf->hw, &tmp_add_list);
 568	if (status)
 569		dev_err(&pf->pdev->dev,
 570			"could not add mac filters error %d\n", status);
 571	else
 572		vf->num_mac = 1;
 573
 574	/* Clear this bit after VF initialization since we shouldn't reclaim
 575	 * and reassign interrupts for synchronous or asynchronous VFR events.
 576	 * We don't want to reconfigure interrupts since AVF driver doesn't
 577	 * expect vector assignment to be changed unless there is a request for
 578	 * more vectors.
 579	 */
 580ice_alloc_vsi_res_exit:
 581	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
 582	return status;
 583}
 584
 585/**
 586 * ice_alloc_vf_res - Allocate VF resources
 587 * @vf: pointer to the VF structure
 588 */
 589static int ice_alloc_vf_res(struct ice_vf *vf)
 590{
 591	struct ice_pf *pf = vf->pf;
 592	int tx_rx_queue_left;
 593	int status;
 594
 595	/* Update number of VF queues, in case VF had requested for queue
 596	 * changes
 597	 */
 598	tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
 599				 ice_get_avail_rxq_count(pf));
 600	tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
 601	if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
 602	    vf->num_req_qs != vf->num_vf_qs)
 603		vf->num_vf_qs = vf->num_req_qs;
 604
 605	/* setup VF VSI and necessary resources */
 606	status = ice_alloc_vsi_res(vf);
 607	if (status)
 608		goto ice_alloc_vf_res_exit;
 609
 610	if (vf->trusted)
 611		set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 612	else
 613		clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 614
 615	/* VF is now completely initialized */
 616	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
 617
 618	return status;
 619
 620ice_alloc_vf_res_exit:
 621	ice_free_vf_res(vf);
 622	return status;
 623}
 624
 625/**
 626 * ice_ena_vf_mappings
 627 * @vf: pointer to the VF structure
 628 *
 629 * Enable VF vectors and queues allocation by writing the details into
 630 * respective registers.
 
 631 */
 632static void ice_ena_vf_mappings(struct ice_vf *vf)
 633{
 634	int abs_vf_id, abs_first, abs_last;
 
 635	struct ice_pf *pf = vf->pf;
 636	struct ice_vsi *vsi;
 637	int first, last, v;
 638	struct ice_hw *hw;
 639	u32 reg;
 640
 641	hw = &pf->hw;
 642	vsi = pf->vsi[vf->lan_vsi_idx];
 643	first = vf->first_vector_idx;
 644	last = (first + pf->num_vf_msix) - 1;
 645	abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
 646	abs_last = (abs_first + pf->num_vf_msix) - 1;
 647	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 648
 649	/* VF Vector allocation */
 650	reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
 651	       ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
 652	       VPINT_ALLOC_VALID_M);
 
 
 653	wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
 654
 655	reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
 656		 & VPINT_ALLOC_PCI_FIRST_M) |
 657	       ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
 658	       VPINT_ALLOC_PCI_VALID_M);
 659	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
 
 660	/* map the interrupts to its functions */
 661	for (v = first; v <= last; v++) {
 662		reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
 663			GLINT_VECT2FUNC_VF_NUM_M) |
 664		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 665			GLINT_VECT2FUNC_PF_NUM_M));
 666		wr32(hw, GLINT_VECT2FUNC(v), reg);
 667	}
 668
 669	/* Map mailbox interrupt. We put an explicit 0 here to remind us that
 670	 * VF admin queue interrupts will go to VF MSI-X vector 0.
 671	 */
 672	wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 673	/* set regardless of mapping mode */
 674	wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
 675
 676	/* VF Tx queues allocation */
 677	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 678		/* set the VF PF Tx queue range
 679		 * VFNUMQ value should be set to (number of queues - 1). A value
 680		 * of 0 means 1 queue and a value of 255 means 256 queues
 681		 */
 682		reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
 683			VPLAN_TX_QBASE_VFFIRSTQ_M) |
 684		       (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
 685			VPLAN_TX_QBASE_VFNUMQ_M));
 686		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
 687	} else {
 688		dev_err(&pf->pdev->dev,
 689			"Scattered mode for VF Tx queues is not yet implemented\n");
 690	}
 691
 692	/* set regardless of mapping mode */
 693	wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
 694
 695	/* VF Rx queues allocation */
 696	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 697		/* set the VF PF Rx queue range
 698		 * VFNUMQ value should be set to (number of queues - 1). A value
 699		 * of 0 means 1 queue and a value of 255 means 256 queues
 700		 */
 701		reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
 702			VPLAN_RX_QBASE_VFFIRSTQ_M) |
 703		       (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
 704			VPLAN_RX_QBASE_VFNUMQ_M));
 705		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
 706	} else {
 707		dev_err(&pf->pdev->dev,
 708			"Scattered mode for VF Rx queues is not yet implemented\n");
 709	}
 710}
 711
 712/**
 
 
 
 
 
 
 
 
 
 
 
 
 713 * ice_determine_res
 714 * @pf: pointer to the PF structure
 715 * @avail_res: available resources in the PF structure
 716 * @max_res: maximum resources that can be given per VF
 717 * @min_res: minimum resources that can be given per VF
 718 *
 719 * Returns non-zero value if resources (queues/vectors) are available or
 720 * returns zero if PF cannot accommodate for all num_alloc_vfs.
 721 */
 722static int
 723ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
 724{
 725	bool checked_min_res = false;
 726	int res;
 727
 728	/* start by checking if PF can assign max number of resources for
 729	 * all num_alloc_vfs.
 730	 * if yes, return number per VF
 731	 * If no, divide by 2 and roundup, check again
 732	 * repeat the loop till we reach a point where even minimum resources
 733	 * are not available, in that case return 0
 734	 */
 735	res = max_res;
 736	while ((res >= min_res) && !checked_min_res) {
 737		int num_all_res;
 738
 739		num_all_res = pf->num_alloc_vfs * res;
 740		if (num_all_res <= avail_res)
 741			return res;
 742
 743		if (res == min_res)
 744			checked_min_res = true;
 745
 746		res = DIV_ROUND_UP(res, 2);
 747	}
 748	return 0;
 749}
 750
 751/**
 752 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
 753 * @vf: VF to calculate the register index for
 754 * @q_vector: a q_vector associated to the VF
 755 */
 756int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
 757{
 758	struct ice_pf *pf;
 759
 760	if (!vf || !q_vector)
 761		return -EINVAL;
 762
 763	pf = vf->pf;
 764
 765	/* always add one to account for the OICR being the first MSIX */
 766	return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
 767		q_vector->v_idx + 1;
 768}
 769
 770/**
 771 * ice_get_max_valid_res_idx - Get the max valid resource index
 772 * @res: pointer to the resource to find the max valid index for
 773 *
 774 * Start from the end of the ice_res_tracker and return right when we find the
 775 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
 776 * valid for SR-IOV because it is the only consumer that manipulates the
 777 * res->end and this is always called when res->end is set to res->num_entries.
 778 */
 779static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
 780{
 781	int i;
 782
 783	if (!res)
 784		return -EINVAL;
 785
 786	for (i = res->num_entries - 1; i >= 0; i--)
 787		if (res->list[i] & ICE_RES_VALID_BIT)
 788			return i;
 789
 790	return 0;
 791}
 792
 793/**
 794 * ice_sriov_set_msix_res - Set any used MSIX resources
 795 * @pf: pointer to PF structure
 796 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
 797 *
 798 * This function allows SR-IOV resources to be taken from the end of the PF's
 799 * allowed HW MSIX vectors so in many cases the irq_tracker will not
 800 * be needed. In these cases we just set the pf->sriov_base_vector and return
 801 * success.
 802 *
 803 * If SR-IOV needs to use any pf->irq_tracker entries it updates the
 804 * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
 805 * so any calls to ice_get_res() using the irq_tracker will not try to use
 806 * resources at or beyond the newly set value.
 807 *
 808 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
 809 * in the PF's space available for SR-IOV.
 810 */
 811static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
 812{
 813	int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
 814	u16 pf_total_msix_vectors =
 815		pf->hw.func_caps.common_cap.num_msix_vectors;
 816	struct ice_res_tracker *res = pf->irq_tracker;
 817	int sriov_base_vector;
 818
 819	if (max_valid_res_idx < 0)
 820		return max_valid_res_idx;
 821
 822	sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
 823
 824	/* make sure we only grab irq_tracker entries from the list end and
 825	 * that we have enough available MSIX vectors
 826	 */
 827	if (sriov_base_vector <= max_valid_res_idx)
 828		return -EINVAL;
 829
 830	pf->sriov_base_vector = sriov_base_vector;
 831
 832	/* dip into irq_tracker entries and update used resources */
 833	if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
 834		pf->num_avail_sw_msix -=
 835			res->num_entries - pf->sriov_base_vector;
 836		res->end = pf->sriov_base_vector;
 837	}
 838
 839	return 0;
 840}
 841
 842/**
 843 * ice_check_avail_res - check if vectors and queues are available
 844 * @pf: pointer to the PF structure
 845 *
 846 * This function is where we calculate actual number of resources for VF VSIs,
 847 * we don't reserve ahead of time during probe. Returns success if vectors and
 848 * queues resources are available, otherwise returns error code
 
 
 
 
 
 
 
 
 
 
 
 
 
 849 */
 850static int ice_check_avail_res(struct ice_pf *pf)
 851{
 852	int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
 853	u16 num_msix, num_txq, num_rxq, num_avail_msix;
 
 
 854
 855	if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
 856		return -EINVAL;
 857
 858	/* add 1 to max_valid_res_idx to account for it being 0-based */
 859	num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
 860		(max_valid_res_idx + 1);
 861
 862	/* Grab from HW interrupts common pool
 863	 * Note: By the time the user decides it needs more vectors in a VF
 864	 * its already too late since one must decide this prior to creating the
 865	 * VF interface. So the best we can do is take a guess as to what the
 866	 * user might want.
 867	 *
 868	 * We have two policies for vector allocation:
 869	 * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
 870	 * number of NFV VFs used for NFV appliances, since this is a special
 871	 * case, we try to assign maximum vectors per VF (65) as much as
 872	 * possible, based on determine_resources algorithm.
 873	 * 2. if num_alloc_vfs is from 17 to 256, then its large number of
 874	 * regular VFs which are not used for any special purpose. Hence try to
 875	 * grab default interrupt vectors (5 as supported by AVF driver).
 876	 */
 877	if (pf->num_alloc_vfs <= 16) {
 878		num_msix = ice_determine_res(pf, num_avail_msix,
 879					     ICE_MAX_INTR_PER_VF,
 880					     ICE_MIN_INTR_PER_VF);
 881	} else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
 882		num_msix = ice_determine_res(pf, num_avail_msix,
 883					     ICE_DFLT_INTR_PER_VF,
 884					     ICE_MIN_INTR_PER_VF);
 885	} else {
 886		dev_err(&pf->pdev->dev,
 887			"Number of VFs %d exceeds max VF count %d\n",
 888			pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
 889		return -EIO;
 890	}
 891
 892	if (!num_msix)
 893		return -EIO;
 894
 895	/* Grab from the common pool
 896	 * start by requesting Default queues (4 as supported by AVF driver),
 897	 * Note that, the main difference between queues and vectors is, latter
 898	 * can only be reserved at init time but queues can be requested by VF
 899	 * at runtime through Virtchnl, that is the reason we start by reserving
 900	 * few queues.
 901	 */
 902	num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
 903				    ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
 
 
 
 904
 905	num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
 906				    ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
 907
 908	if (!num_txq || !num_rxq)
 
 
 
 
 
 909		return -EIO;
 
 910
 911	if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
 
 
 912		return -EINVAL;
 
 913
 914	/* since AVF driver works with only queue pairs which means, it expects
 915	 * to have equal number of Rx and Tx queues, so take the minimum of
 916	 * available Tx or Rx queues
 917	 */
 918	pf->num_vf_qps = min_t(int, num_txq, num_rxq);
 919	pf->num_vf_msix = num_msix;
 920
 921	return 0;
 922}
 923
 924/**
 925 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
 926 * @vf: pointer to the VF structure
 927 *
 928 * Cleanup a VF after the hardware reset is finished. Expects the caller to
 929 * have verified whether the reset is finished properly, and ensure the
 930 * minimum amount of wait time has passed. Reallocate VF resources back to make
 931 * VF state active
 932 */
 933static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
 934{
 935	struct ice_pf *pf = vf->pf;
 936	struct ice_hw *hw;
 937	u32 reg;
 938
 939	hw = &pf->hw;
 940
 941	/* PF software completes the flow by notifying VF that reset flow is
 942	 * completed. This is done by enabling hardware by clearing the reset
 943	 * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
 944	 * register to VFR completed (done at the end of this function)
 945	 * By doing this we allow HW to access VF memory at any point. If we
 946	 * did it any sooner, HW could access memory while it was being freed
 947	 * in ice_free_vf_res(), causing an IOMMU fault.
 948	 *
 949	 * On the other hand, this needs to be done ASAP, because the VF driver
 950	 * is waiting for this to happen and may report a timeout. It's
 951	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
 952	 * it.
 953	 */
 954	reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 955	reg &= ~VPGEN_VFRTRIG_VFSWR_M;
 956	wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 957
 958	/* reallocate VF resources to finish resetting the VSI state */
 959	if (!ice_alloc_vf_res(vf)) {
 960		ice_ena_vf_mappings(vf);
 961		set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 962		clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
 963		vf->num_vlan = 0;
 964	}
 965
 966	/* Tell the VF driver the reset is done. This needs to be done only
 967	 * after VF has been fully initialized, because the VF driver may
 968	 * request resources immediately after setting this flag.
 969	 */
 970	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
 971}
 972
 973/**
 974 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
 975 * @vf: pointer to the VF info
 976 * @vsi: the VSI being configured
 977 * @promisc_m: mask of promiscuous config bits
 978 * @rm_promisc: promisc flag request from the VF to remove or add filter
 979 *
 980 * This function configures VF VSI promiscuous mode, based on the VF requests,
 981 * for Unicast, Multicast and VLAN
 982 */
 983static enum ice_status
 984ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
 985		       bool rm_promisc)
 986{
 987	struct ice_pf *pf = vf->pf;
 988	enum ice_status status = 0;
 989	struct ice_hw *hw;
 990
 991	hw = &pf->hw;
 992	if (vf->num_vlan) {
 993		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
 994						  rm_promisc);
 995	} else if (vf->port_vlan_id) {
 996		if (rm_promisc)
 997			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
 998						       vf->port_vlan_id);
 999		else
1000			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1001						     vf->port_vlan_id);
1002	} else {
1003		if (rm_promisc)
1004			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1005						       0);
1006		else
1007			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1008						     0);
1009	}
1010
1011	return status;
1012}
1013
 
 
 
 
 
 
 
 
 
 
1014/**
1015 * ice_config_res_vfs - Finalize allocation of VFs resources in one go
1016 * @pf: pointer to the PF structure
1017 *
1018 * This function is being called as last part of resetting all VFs, or when
1019 * configuring VFs for the first time, where there is no resource to be freed
1020 * Returns true if resources were properly allocated for all VFs, and false
1021 * otherwise.
1022 */
1023static bool ice_config_res_vfs(struct ice_pf *pf)
1024{
1025	struct ice_hw *hw = &pf->hw;
1026	int v;
 
1027
1028	if (ice_check_avail_res(pf)) {
1029		dev_err(&pf->pdev->dev,
1030			"Cannot allocate VF resources, try with fewer number of VFs\n");
1031		return false;
1032	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1033
1034	/* rearm global interrupts */
1035	if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
1036		ice_irq_dynamic_ena(hw, NULL, NULL);
1037
1038	/* Finish resetting each VF and allocate resources */
1039	for (v = 0; v < pf->num_alloc_vfs; v++) {
1040		struct ice_vf *vf = &pf->vf[v];
1041
1042		vf->num_vf_qs = pf->num_vf_qps;
1043		dev_dbg(&pf->pdev->dev,
1044			"VF-id %d has %d queues configured\n",
1045			vf->vf_id, vf->num_vf_qs);
1046		ice_cleanup_and_realloc_vf(vf);
 
 
 
 
 
1047	}
 
 
 
 
 
1048
1049	ice_flush(hw);
1050	clear_bit(__ICE_VF_DIS, pf->state);
1051
1052	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053}
1054
1055/**
1056 * ice_reset_all_vfs - reset all allocated VFs in one go
1057 * @pf: pointer to the PF structure
1058 * @is_vflr: true if VFLR was issued, false if not
1059 *
1060 * First, tell the hardware to reset each VF, then do all the waiting in one
1061 * chunk, and finally finish restoring each VF after the wait. This is useful
1062 * during PF routines which need to reset all VFs, as otherwise it must perform
1063 * these resets in a serialized fashion.
1064 *
1065 * Returns true if any VFs were reset, and false otherwise.
1066 */
1067bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1068{
 
1069	struct ice_hw *hw = &pf->hw;
1070	struct ice_vf *vf;
1071	int v, i;
1072
1073	/* If we don't have any VFs, then there is nothing to reset */
1074	if (!pf->num_alloc_vfs)
1075		return false;
1076
1077	/* If VFs have been disabled, there is no need to reset */
1078	if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1079		return false;
1080
1081	/* Begin reset on all VFs at once */
1082	for (v = 0; v < pf->num_alloc_vfs; v++)
1083		ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1084
1085	for (v = 0; v < pf->num_alloc_vfs; v++) {
1086		struct ice_vsi *vsi;
1087
1088		vf = &pf->vf[v];
1089		vsi = pf->vsi[vf->lan_vsi_idx];
1090		if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1091			ice_dis_vf_qs(vf);
1092		ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1093				NULL, ICE_VF_RESET, vf->vf_id, NULL);
1094	}
1095
1096	/* HW requires some time to make sure it can flush the FIFO for a VF
1097	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1098	 * sequence to make sure that it has completed. We'll keep track of
1099	 * the VFs using a simple iterator that increments once that VF has
1100	 * finished resetting.
1101	 */
1102	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1103
1104		/* Check each VF in sequence */
1105		while (v < pf->num_alloc_vfs) {
1106			u32 reg;
1107
1108			vf = &pf->vf[v];
1109			reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1110			if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1111				/* only delay if the check failed */
1112				usleep_range(10, 20);
1113				break;
1114			}
1115
1116			/* If the current VF has finished resetting, move on
1117			 * to the next VF in sequence.
1118			 */
1119			v++;
1120		}
1121	}
1122
1123	/* Display a warning if at least one VF didn't manage to reset in
1124	 * time, but continue on with the operation.
1125	 */
1126	if (v < pf->num_alloc_vfs)
1127		dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
1128
1129	/* free VF resources to begin resetting the VSI state */
1130	for (v = 0; v < pf->num_alloc_vfs; v++) {
1131		vf = &pf->vf[v];
1132
1133		ice_free_vf_res(vf);
1134
1135		/* Free VF queues as well, and reallocate later.
1136		 * If a given VF has different number of queues
1137		 * configured, the request for update will come
1138		 * via mailbox communication.
1139		 */
1140		vf->num_vf_qs = 0;
1141	}
1142
1143	if (ice_sriov_free_msix_res(pf))
1144		dev_err(&pf->pdev->dev,
1145			"Failed to free MSIX resources used by SR-IOV\n");
1146
1147	if (!ice_config_res_vfs(pf))
1148		return false;
1149
1150	return true;
1151}
1152
1153/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154 * ice_reset_vf - Reset a particular VF
1155 * @vf: pointer to the VF structure
1156 * @is_vflr: true if VFLR was issued, false if not
1157 *
1158 * Returns true if the VF is reset, false otherwise.
 
1159 */
1160static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1161{
1162	struct ice_pf *pf = vf->pf;
1163	struct ice_vsi *vsi;
 
1164	struct ice_hw *hw;
1165	bool rsd = false;
1166	u8 promisc_m;
1167	u32 reg;
1168	int i;
1169
1170	/* If the PF has been disabled, there is no need resetting VF until
1171	 * PF is active again.
1172	 */
1173	if (test_bit(__ICE_VF_DIS, pf->state))
1174		return false;
1175
1176	/* If the VF has been disabled, this means something else is
1177	 * resetting the VF, so we shouldn't continue. Otherwise, set
1178	 * disable VF state bit for actual reset, and continue.
1179	 */
1180	if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
1181		return false;
1182
 
 
 
 
 
 
 
 
1183	ice_trigger_vf_reset(vf, is_vflr, false);
1184
1185	vsi = pf->vsi[vf->lan_vsi_idx];
1186
1187	if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1188		ice_dis_vf_qs(vf);
1189
1190	/* Call Disable LAN Tx queue AQ whether or not queues are
1191	 * enabled. This is needed for successful completion of VFR.
1192	 */
1193	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1194			NULL, ICE_VF_RESET, vf->vf_id, NULL);
1195
1196	hw = &pf->hw;
1197	/* poll VPGEN_VFRSTAT reg to make sure
1198	 * that reset is complete
1199	 */
1200	for (i = 0; i < 10; i++) {
1201		/* VF reset requires driver to first reset the VF and then
1202		 * poll the status register to make sure that the reset
1203		 * completed successfully.
1204		 */
1205		reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1206		if (reg & VPGEN_VFRSTAT_VFRD_M) {
1207			rsd = true;
1208			break;
1209		}
1210
1211		/* only sleep if the reset is not done */
1212		usleep_range(10, 20);
1213	}
1214
1215	/* Display a warning if VF didn't manage to reset in time, but need to
1216	 * continue on with the operation.
1217	 */
1218	if (!rsd)
1219		dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1220			 vf->vf_id);
1221
1222	/* disable promiscuous modes in case they were enabled
1223	 * ignore any error if disabling process failed
1224	 */
1225	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1226	    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1227		if (vf->port_vlan_id ||  vf->num_vlan)
1228			promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1229		else
1230			promisc_m = ICE_UCAST_PROMISC_BITS;
1231
1232		vsi = pf->vsi[vf->lan_vsi_idx];
1233		if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1234			dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
1235	}
1236
1237	/* free VF resources to begin resetting the VSI state */
1238	ice_free_vf_res(vf);
1239
1240	ice_cleanup_and_realloc_vf(vf);
1241
1242	ice_flush(hw);
1243
1244	return true;
1245}
1246
1247/**
1248 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1249 * @pf: pointer to the PF structure
1250 */
1251void ice_vc_notify_link_state(struct ice_pf *pf)
1252{
1253	int i;
1254
1255	for (i = 0; i < pf->num_alloc_vfs; i++)
1256		ice_vc_notify_vf_link_state(&pf->vf[i]);
1257}
1258
1259/**
1260 * ice_vc_notify_reset - Send pending reset message to all VFs
1261 * @pf: pointer to the PF structure
1262 *
1263 * indicate a pending reset to all VFs on a given PF
1264 */
1265void ice_vc_notify_reset(struct ice_pf *pf)
1266{
1267	struct virtchnl_pf_event pfe;
1268
1269	if (!pf->num_alloc_vfs)
1270		return;
1271
1272	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1273	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1274	ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1275			    (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1276}
1277
1278/**
1279 * ice_vc_notify_vf_reset - Notify VF of a reset event
1280 * @vf: pointer to the VF structure
1281 */
1282static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1283{
1284	struct virtchnl_pf_event pfe;
 
 
 
 
1285
1286	/* validate the request */
1287	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1288		return;
1289
1290	/* verify if the VF is in either init or active before proceeding */
1291	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1292	    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
 
 
 
1293		return;
1294
1295	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1296	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1297	ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1298			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1299			      NULL);
1300}
1301
1302/**
1303 * ice_alloc_vfs - Allocate and set up VFs resources
1304 * @pf: pointer to the PF structure
1305 * @num_alloc_vfs: number of VFs to allocate
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1306 */
1307static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1308{
1309	struct ice_hw *hw = &pf->hw;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1310	struct ice_vf *vfs;
1311	int i, ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1312
1313	/* Disable global interrupt 0 so we don't try to handle the VFLR. */
1314	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1315	     ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1316	set_bit(__ICE_OICR_INTR_DIS, pf->state);
1317	ice_flush(hw);
1318
1319	ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1320	if (ret) {
1321		pf->num_alloc_vfs = 0;
1322		goto err_unroll_intr;
1323	}
1324	/* allocate memory */
1325	vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
1326			   GFP_KERNEL);
1327	if (!vfs) {
1328		ret = -ENOMEM;
1329		goto err_pci_disable_sriov;
1330	}
1331	pf->vf = vfs;
1332
1333	/* apply default profile */
1334	for (i = 0; i < num_alloc_vfs; i++) {
1335		vfs[i].pf = pf;
1336		vfs[i].vf_sw_id = pf->first_sw;
1337		vfs[i].vf_id = i;
1338
1339		/* assign default capabilities */
1340		set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1341		vfs[i].spoofchk = true;
 
 
1342	}
1343	pf->num_alloc_vfs = num_alloc_vfs;
1344
1345	/* VF resources get allocated with initialization */
1346	if (!ice_config_res_vfs(pf)) {
1347		ret = -EIO;
 
 
1348		goto err_unroll_sriov;
1349	}
1350
1351	return ret;
 
1352
1353err_unroll_sriov:
 
1354	pf->vf = NULL;
1355	devm_kfree(&pf->pdev->dev, vfs);
1356	vfs = NULL;
1357	pf->num_alloc_vfs = 0;
1358err_pci_disable_sriov:
1359	pci_disable_sriov(pf->pdev);
1360err_unroll_intr:
1361	/* rearm interrupts here */
1362	ice_irq_dynamic_ena(hw, NULL, NULL);
1363	clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1364	return ret;
1365}
1366
1367/**
1368 * ice_pf_state_is_nominal - checks the PF for nominal state
1369 * @pf: pointer to PF to check
1370 *
1371 * Check the PF's state for a collection of bits that would indicate
1372 * the PF is in a state that would inhibit normal operation for
1373 * driver functionality.
1374 *
1375 * Returns true if PF is in a nominal state.
1376 * Returns false otherwise
1377 */
1378static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1379{
1380	DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1381
1382	if (!pf)
1383		return false;
1384
1385	bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1386	if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1387		return false;
1388
1389	return true;
1390}
1391
1392/**
1393 * ice_pci_sriov_ena - Enable or change number of VFs
1394 * @pf: pointer to the PF structure
1395 * @num_vfs: number of VFs to allocate
 
 
1396 */
1397static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1398{
1399	int pre_existing_vfs = pci_num_vf(pf->pdev);
1400	struct device *dev = &pf->pdev->dev;
1401	int err;
1402
1403	if (!ice_pf_state_is_nominal(pf)) {
1404		dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1405		return -EBUSY;
1406	}
1407
1408	if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1409		dev_err(dev, "This device is not capable of SR-IOV\n");
1410		return -ENODEV;
1411	}
1412
1413	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1414		ice_free_vfs(pf);
1415	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1416		return num_vfs;
1417
1418	if (num_vfs > pf->num_vfs_supported) {
1419		dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1420			num_vfs, pf->num_vfs_supported);
1421		return -ENOTSUPP;
1422	}
1423
1424	dev_info(dev, "Allocating %d VFs\n", num_vfs);
1425	err = ice_alloc_vfs(pf, num_vfs);
1426	if (err) {
1427		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1428		return err;
1429	}
1430
1431	set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1432	return num_vfs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1433}
1434
1435/**
1436 * ice_sriov_configure - Enable or change number of VFs via sysfs
1437 * @pdev: pointer to a pci_dev structure
1438 * @num_vfs: number of VFs to allocate
1439 *
1440 * This function is called when the user updates the number of VFs in sysfs.
 
 
1441 */
1442int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1443{
1444	struct ice_pf *pf = pci_get_drvdata(pdev);
 
 
1445
1446	if (ice_is_safe_mode(pf)) {
1447		dev_err(&pf->pdev->dev,
1448			"SR-IOV cannot be configured - Device is in Safe Mode\n");
1449		return -EOPNOTSUPP;
1450	}
1451
1452	if (num_vfs)
1453		return ice_pci_sriov_ena(pf, num_vfs);
 
 
 
1454
1455	if (!pci_vfs_assigned(pdev)) {
1456		ice_free_vfs(pf);
1457	} else {
1458		dev_err(&pf->pdev->dev,
1459			"can't free VFs because some are assigned to VMs.\n");
1460		return -EBUSY;
1461	}
1462
1463	return 0;
 
 
 
 
1464}
1465
1466/**
1467 * ice_process_vflr_event - Free VF resources via IRQ calls
1468 * @pf: pointer to the PF structure
1469 *
1470 * called from the VFLR IRQ handler to
1471 * free up VF resources and state variables
1472 */
1473void ice_process_vflr_event(struct ice_pf *pf)
1474{
1475	struct ice_hw *hw = &pf->hw;
1476	int vf_id;
1477	u32 reg;
1478
1479	if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1480	    !pf->num_alloc_vfs)
1481		return;
1482
1483	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1484		struct ice_vf *vf = &pf->vf[vf_id];
1485		u32 reg_idx, bit_idx;
1486
1487		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1488		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1489		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
1490		reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1491		if (reg & BIT(bit_idx))
1492			/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1493			ice_reset_vf(vf, true);
1494	}
1495}
1496
1497/**
1498 * ice_vc_dis_vf - Disable a given VF via SW reset
1499 * @vf: pointer to the VF info
1500 *
1501 * Disable the VF through a SW reset
1502 */
1503static void ice_vc_dis_vf(struct ice_vf *vf)
1504{
1505	ice_vc_notify_vf_reset(vf);
1506	ice_reset_vf(vf, false);
1507}
1508
1509/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1510 * ice_vc_send_msg_to_vf - Send message to VF
1511 * @vf: pointer to the VF info
1512 * @v_opcode: virtual channel opcode
1513 * @v_retval: virtual channel return value
1514 * @msg: pointer to the msg buffer
1515 * @msglen: msg length
1516 *
1517 * send msg to VF
1518 */
1519static int
1520ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1521		      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1522{
1523	enum ice_status aq_ret;
 
1524	struct ice_pf *pf;
1525
1526	/* validate the request */
1527	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1528		return -EINVAL;
1529
1530	pf = vf->pf;
 
 
 
 
1531
1532	/* single place to detect unsuccessful return values */
1533	if (v_retval) {
1534		vf->num_inval_msgs++;
1535		dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1536			 vf->vf_id, v_opcode, v_retval);
1537		if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1538			dev_err(&pf->pdev->dev,
1539				"Number of invalid messages exceeded for VF %d\n",
1540				vf->vf_id);
1541			dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1542			set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1543			return -EIO;
1544		}
1545	} else {
1546		vf->num_valid_msgs++;
1547		/* reset the invalid counter, if a valid message is received. */
1548		vf->num_inval_msgs = 0;
1549	}
1550
1551	aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1552				       msg, msglen, NULL);
1553	if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1554		dev_info(&pf->pdev->dev,
1555			 "Unable to send the message to VF %d ret %d aq_err %d\n",
1556			 vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
1557		return -EIO;
1558	}
1559
1560	return 0;
1561}
1562
1563/**
1564 * ice_vc_get_ver_msg
1565 * @vf: pointer to the VF info
1566 * @msg: pointer to the msg buffer
1567 *
1568 * called from the VF to request the API version used by the PF
1569 */
1570static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1571{
1572	struct virtchnl_version_info info = {
1573		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1574	};
1575
1576	vf->vf_ver = *(struct virtchnl_version_info *)msg;
1577	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1578	if (VF_IS_V10(&vf->vf_ver))
1579		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1580
1581	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1582				     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1583				     sizeof(struct virtchnl_version_info));
1584}
1585
1586/**
1587 * ice_vc_get_vf_res_msg
1588 * @vf: pointer to the VF info
1589 * @msg: pointer to the msg buffer
1590 *
1591 * called from the VF to request its resources
1592 */
1593static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1594{
1595	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1596	struct virtchnl_vf_resource *vfres = NULL;
1597	struct ice_pf *pf = vf->pf;
1598	struct ice_vsi *vsi;
1599	int len = 0;
1600	int ret;
1601
1602	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1603		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1604		goto err;
1605	}
1606
1607	len = sizeof(struct virtchnl_vf_resource);
1608
1609	vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
1610	if (!vfres) {
1611		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1612		len = 0;
1613		goto err;
1614	}
1615	if (VF_IS_V11(&vf->vf_ver))
1616		vf->driver_caps = *(u32 *)msg;
1617	else
1618		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1619				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1620				  VIRTCHNL_VF_OFFLOAD_VLAN;
1621
1622	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1623	vsi = pf->vsi[vf->lan_vsi_idx];
1624	if (!vsi) {
1625		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1626		goto err;
1627	}
1628
1629	if (!vsi->info.pvid)
1630		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1631
1632	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1633		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1634	} else {
1635		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1636			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1637		else
1638			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1639	}
1640
1641	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1642		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1643
1644	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1645		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1646
1647	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1648		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1649
1650	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1651		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1652
1653	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1654		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1655
1656	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1657		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1658
1659	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1660		vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1661
1662	vfres->num_vsis = 1;
1663	/* Tx and Rx queue are equal for VF */
1664	vfres->num_queue_pairs = vsi->num_txq;
1665	vfres->max_vectors = pf->num_vf_msix;
1666	vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1667	vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1668
1669	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1670	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1671	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1672	ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1673			vf->dflt_lan_addr.addr);
1674
 
 
 
1675	set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1676
1677err:
1678	/* send the response back to the VF */
1679	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1680				    (u8 *)vfres, len);
1681
1682	devm_kfree(&pf->pdev->dev, vfres);
1683	return ret;
1684}
1685
1686/**
1687 * ice_vc_reset_vf_msg
1688 * @vf: pointer to the VF info
1689 *
1690 * called from the VF to reset itself,
1691 * unlike other virtchnl messages, PF driver
1692 * doesn't send the response back to the VF
1693 */
1694static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1695{
1696	if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1697		ice_reset_vf(vf, false);
1698}
1699
1700/**
1701 * ice_find_vsi_from_id
1702 * @pf: the PF structure to search for the VSI
1703 * @id: ID of the VSI it is searching for
1704 *
1705 * searches for the VSI with the given ID
1706 */
1707static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1708{
1709	int i;
1710
1711	ice_for_each_vsi(pf, i)
1712		if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1713			return pf->vsi[i];
1714
1715	return NULL;
1716}
1717
1718/**
1719 * ice_vc_isvalid_vsi_id
1720 * @vf: pointer to the VF info
1721 * @vsi_id: VF relative VSI ID
1722 *
1723 * check for the valid VSI ID
1724 */
1725static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1726{
1727	struct ice_pf *pf = vf->pf;
1728	struct ice_vsi *vsi;
1729
1730	vsi = ice_find_vsi_from_id(pf, vsi_id);
1731
1732	return (vsi && (vsi->vf_id == vf->vf_id));
1733}
1734
1735/**
1736 * ice_vc_isvalid_q_id
1737 * @vf: pointer to the VF info
1738 * @vsi_id: VSI ID
1739 * @qid: VSI relative queue ID
1740 *
1741 * check for the valid queue ID
1742 */
1743static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1744{
1745	struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1746	/* allocated Tx and Rx queues should be always equal for VF VSI */
1747	return (vsi && (qid < vsi->alloc_txq));
1748}
1749
1750/**
1751 * ice_vc_isvalid_ring_len
1752 * @ring_len: length of ring
1753 *
1754 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
1755 * or zero
1756 */
1757static bool ice_vc_isvalid_ring_len(u16 ring_len)
1758{
1759	return ring_len == 0 ||
1760	       (ring_len >= ICE_MIN_NUM_DESC &&
1761		ring_len <= ICE_MAX_NUM_DESC &&
1762		!(ring_len % ICE_REQ_DESC_MULTIPLE));
1763}
1764
1765/**
1766 * ice_vc_config_rss_key
1767 * @vf: pointer to the VF info
1768 * @msg: pointer to the msg buffer
1769 *
1770 * Configure the VF's RSS key
1771 */
1772static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1773{
1774	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1775	struct virtchnl_rss_key *vrk =
1776		(struct virtchnl_rss_key *)msg;
1777	struct ice_pf *pf = vf->pf;
1778	struct ice_vsi *vsi = NULL;
1779
1780	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1781		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1782		goto error_param;
1783	}
1784
1785	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1786		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1787		goto error_param;
1788	}
1789
1790	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1791		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1792		goto error_param;
1793	}
1794
1795	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1796		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1797		goto error_param;
1798	}
1799
1800	vsi = pf->vsi[vf->lan_vsi_idx];
1801	if (!vsi) {
1802		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1803		goto error_param;
1804	}
1805
1806	if (ice_set_rss(vsi, vrk->key, NULL, 0))
1807		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1808error_param:
1809	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1810				     NULL, 0);
1811}
1812
1813/**
1814 * ice_vc_config_rss_lut
1815 * @vf: pointer to the VF info
1816 * @msg: pointer to the msg buffer
1817 *
1818 * Configure the VF's RSS LUT
1819 */
1820static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1821{
1822	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1823	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1824	struct ice_pf *pf = vf->pf;
1825	struct ice_vsi *vsi = NULL;
1826
1827	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1828		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1829		goto error_param;
1830	}
1831
1832	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1833		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1834		goto error_param;
1835	}
1836
1837	if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
1838		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1839		goto error_param;
1840	}
1841
1842	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1843		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1844		goto error_param;
1845	}
1846
1847	vsi = pf->vsi[vf->lan_vsi_idx];
1848	if (!vsi) {
1849		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1850		goto error_param;
1851	}
1852
1853	if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
1854		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1855error_param:
1856	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1857				     NULL, 0);
1858}
1859
1860/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1861 * ice_vc_get_stats_msg
1862 * @vf: pointer to the VF info
1863 * @msg: pointer to the msg buffer
1864 *
1865 * called from the VF to get VSI stats
1866 */
1867static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1868{
1869	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1870	struct virtchnl_queue_select *vqs =
1871		(struct virtchnl_queue_select *)msg;
 
1872	struct ice_pf *pf = vf->pf;
1873	struct ice_eth_stats stats;
1874	struct ice_vsi *vsi;
1875
1876	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1877		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1878		goto error_param;
1879	}
1880
1881	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1882		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1883		goto error_param;
1884	}
1885
1886	vsi = pf->vsi[vf->lan_vsi_idx];
1887	if (!vsi) {
1888		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1889		goto error_param;
1890	}
1891
1892	memset(&stats, 0, sizeof(struct ice_eth_stats));
1893	ice_update_eth_stats(vsi);
1894
1895	stats = vsi->eth_stats;
1896
1897error_param:
1898	/* send the response to the VF */
1899	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1900				     (u8 *)&stats, sizeof(stats));
1901}
1902
1903/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1904 * ice_vc_ena_qs_msg
1905 * @vf: pointer to the VF info
1906 * @msg: pointer to the msg buffer
1907 *
1908 * called from the VF to enable all or specific queue(s)
1909 */
1910static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1911{
1912	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1913	struct virtchnl_queue_select *vqs =
1914	    (struct virtchnl_queue_select *)msg;
1915	struct ice_pf *pf = vf->pf;
1916	struct ice_vsi *vsi;
1917	unsigned long q_map;
1918	u16 vf_q_id;
1919
1920	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1921		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1922		goto error_param;
1923	}
1924
1925	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1926		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1927		goto error_param;
1928	}
1929
1930	if (!vqs->rx_queues && !vqs->tx_queues) {
1931		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1932		goto error_param;
1933	}
1934
1935	if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
1936	    vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
1937		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1938		goto error_param;
1939	}
1940
1941	vsi = pf->vsi[vf->lan_vsi_idx];
1942	if (!vsi) {
1943		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1944		goto error_param;
1945	}
1946
1947	/* Enable only Rx rings, Tx rings were enabled by the FW when the
1948	 * Tx queue group list was configured and the context bits were
1949	 * programmed using ice_vsi_cfg_txqs
1950	 */
1951	q_map = vqs->rx_queues;
1952	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
1953		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1954			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1955			goto error_param;
1956		}
1957
1958		/* Skip queue if enabled */
1959		if (test_bit(vf_q_id, vf->rxq_ena))
1960			continue;
1961
1962		if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
1963			dev_err(&vsi->back->pdev->dev,
1964				"Failed to enable Rx ring %d on VSI %d\n",
1965				vf_q_id, vsi->vsi_num);
1966			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1967			goto error_param;
1968		}
1969
 
1970		set_bit(vf_q_id, vf->rxq_ena);
1971		vf->num_qs_ena++;
1972	}
1973
1974	vsi = pf->vsi[vf->lan_vsi_idx];
1975	q_map = vqs->tx_queues;
1976	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
1977		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1978			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1979			goto error_param;
1980		}
1981
1982		/* Skip queue if enabled */
1983		if (test_bit(vf_q_id, vf->txq_ena))
1984			continue;
1985
 
1986		set_bit(vf_q_id, vf->txq_ena);
1987		vf->num_qs_ena++;
1988	}
1989
1990	/* Set flag to indicate that queues are enabled */
1991	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1992		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1993
1994error_param:
1995	/* send the response to the VF */
1996	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1997				     NULL, 0);
1998}
1999
2000/**
2001 * ice_vc_dis_qs_msg
2002 * @vf: pointer to the VF info
2003 * @msg: pointer to the msg buffer
2004 *
2005 * called from the VF to disable all or specific
2006 * queue(s)
2007 */
2008static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2009{
2010	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2011	struct virtchnl_queue_select *vqs =
2012	    (struct virtchnl_queue_select *)msg;
2013	struct ice_pf *pf = vf->pf;
2014	struct ice_vsi *vsi;
2015	unsigned long q_map;
2016	u16 vf_q_id;
2017
2018	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2019	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2020		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2021		goto error_param;
2022	}
2023
2024	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2025		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2026		goto error_param;
2027	}
2028
2029	if (!vqs->rx_queues && !vqs->tx_queues) {
2030		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2031		goto error_param;
2032	}
2033
2034	if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
2035	    vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
2036		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2037		goto error_param;
2038	}
2039
2040	vsi = pf->vsi[vf->lan_vsi_idx];
2041	if (!vsi) {
2042		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2043		goto error_param;
2044	}
2045
2046	if (vqs->tx_queues) {
2047		q_map = vqs->tx_queues;
2048
2049		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2050			struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2051			struct ice_txq_meta txq_meta = { 0 };
2052
2053			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2054				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2055				goto error_param;
2056			}
2057
2058			/* Skip queue if not enabled */
2059			if (!test_bit(vf_q_id, vf->txq_ena))
2060				continue;
2061
2062			ice_fill_txq_meta(vsi, ring, &txq_meta);
2063
2064			if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2065						 ring, &txq_meta)) {
2066				dev_err(&vsi->back->pdev->dev,
2067					"Failed to stop Tx ring %d on VSI %d\n",
2068					vf_q_id, vsi->vsi_num);
2069				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2070				goto error_param;
2071			}
2072
2073			/* Clear enabled queues flag */
2074			clear_bit(vf_q_id, vf->txq_ena);
2075			vf->num_qs_ena--;
2076		}
2077	}
2078
2079	if (vqs->rx_queues) {
2080		q_map = vqs->rx_queues;
 
 
 
 
 
 
 
 
2081
2082		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
 
 
2083			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2084				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2085				goto error_param;
2086			}
2087
2088			/* Skip queue if not enabled */
2089			if (!test_bit(vf_q_id, vf->rxq_ena))
2090				continue;
2091
2092			if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
2093				dev_err(&vsi->back->pdev->dev,
2094					"Failed to stop Rx ring %d on VSI %d\n",
2095					vf_q_id, vsi->vsi_num);
2096				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2097				goto error_param;
2098			}
2099
2100			/* Clear enabled queues flag */
2101			clear_bit(vf_q_id, vf->rxq_ena);
2102			vf->num_qs_ena--;
2103		}
2104	}
2105
2106	/* Clear enabled queues flag */
2107	if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
2108		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2109
2110error_param:
2111	/* send the response to the VF */
2112	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2113				     NULL, 0);
2114}
2115
2116/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2117 * ice_vc_cfg_irq_map_msg
2118 * @vf: pointer to the VF info
2119 * @msg: pointer to the msg buffer
2120 *
2121 * called from the VF to configure the IRQ to queue map
2122 */
2123static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2124{
2125	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
 
2126	struct virtchnl_irq_map_info *irqmap_info;
2127	u16 vsi_id, vsi_q_id, vector_id;
2128	struct virtchnl_vector_map *map;
2129	struct ice_pf *pf = vf->pf;
2130	u16 num_q_vectors_mapped;
2131	struct ice_vsi *vsi;
2132	unsigned long qmap;
2133	int i;
2134
2135	irqmap_info = (struct virtchnl_irq_map_info *)msg;
2136	num_q_vectors_mapped = irqmap_info->num_vectors;
2137
2138	/* Check to make sure number of VF vectors mapped is not greater than
2139	 * number of VF vectors originally allocated, and check that
2140	 * there is actually at least a single VF queue vector mapped
2141	 */
2142	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2143	    pf->num_vf_msix < num_q_vectors_mapped ||
2144	    !irqmap_info->num_vectors) {
2145		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2146		goto error_param;
2147	}
2148
2149	vsi = pf->vsi[vf->lan_vsi_idx];
2150	if (!vsi) {
2151		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2152		goto error_param;
2153	}
2154
2155	for (i = 0; i < num_q_vectors_mapped; i++) {
2156		struct ice_q_vector *q_vector;
2157
2158		map = &irqmap_info->vecmap[i];
2159
2160		vector_id = map->vector_id;
2161		vsi_id = map->vsi_id;
2162		/* validate msg params */
2163		if (!(vector_id < pf->hw.func_caps.common_cap
2164		    .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
 
 
2165		    (!vector_id && (map->rxq_map || map->txq_map))) {
2166			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2167			goto error_param;
2168		}
2169
2170		/* No need to map VF miscellaneous or rogue vector */
2171		if (!vector_id)
2172			continue;
2173
2174		/* Subtract non queue vector from vector_id passed by VF
2175		 * to get actual number of VSI queue vector array index
2176		 */
2177		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2178		if (!q_vector) {
2179			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2180			goto error_param;
2181		}
2182
2183		/* lookout for the invalid queue index */
2184		qmap = map->rxq_map;
2185		q_vector->num_ring_rx = 0;
2186		for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2187			if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2188				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2189				goto error_param;
2190			}
2191			q_vector->num_ring_rx++;
2192			q_vector->rx.itr_idx = map->rxitr_idx;
2193			vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2194			ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2195					      q_vector->rx.itr_idx);
2196		}
2197
2198		qmap = map->txq_map;
2199		q_vector->num_ring_tx = 0;
2200		for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2201			if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2202				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2203				goto error_param;
2204			}
2205			q_vector->num_ring_tx++;
2206			q_vector->tx.itr_idx = map->txitr_idx;
2207			vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2208			ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2209					      q_vector->tx.itr_idx);
2210		}
2211	}
2212
2213error_param:
2214	/* send the response to the VF */
2215	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2216				     NULL, 0);
2217}
2218
2219/**
2220 * ice_vc_cfg_qs_msg
2221 * @vf: pointer to the VF info
2222 * @msg: pointer to the msg buffer
2223 *
2224 * called from the VF to configure the Rx/Tx queues
2225 */
2226static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2227{
2228	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2229	struct virtchnl_vsi_queue_config_info *qci =
2230	    (struct virtchnl_vsi_queue_config_info *)msg;
2231	struct virtchnl_queue_pair_info *qpi;
2232	u16 num_rxq = 0, num_txq = 0;
2233	struct ice_pf *pf = vf->pf;
2234	struct ice_vsi *vsi;
2235	int i;
2236
2237	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2238		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2239		goto error_param;
2240	}
2241
2242	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2243		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2244		goto error_param;
2245	}
2246
2247	vsi = pf->vsi[vf->lan_vsi_idx];
2248	if (!vsi) {
2249		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2250		goto error_param;
2251	}
2252
2253	if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
2254	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2255		dev_err(&pf->pdev->dev,
2256			"VF-%d requesting more than supported number of queues: %d\n",
2257			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2258		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2259		goto error_param;
2260	}
2261
2262	for (i = 0; i < qci->num_queue_pairs; i++) {
2263		qpi = &qci->qpair[i];
2264		if (qpi->txq.vsi_id != qci->vsi_id ||
2265		    qpi->rxq.vsi_id != qci->vsi_id ||
2266		    qpi->rxq.queue_id != qpi->txq.queue_id ||
2267		    qpi->txq.headwb_enabled ||
2268		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2269		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2270		    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2271			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2272			goto error_param;
2273		}
2274		/* copy Tx queue info from VF into VSI */
2275		if (qpi->txq.ring_len > 0) {
2276			num_txq++;
2277			vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2278			vsi->tx_rings[i]->count = qpi->txq.ring_len;
2279		}
2280
2281		/* copy Rx queue info from VF into VSI */
2282		if (qpi->rxq.ring_len > 0) {
2283			num_rxq++;
2284			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2285			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2286
2287			if (qpi->rxq.databuffer_size != 0 &&
2288			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2289			     qpi->rxq.databuffer_size < 1024)) {
2290				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2291				goto error_param;
2292			}
2293			vsi->rx_buf_len = qpi->rxq.databuffer_size;
2294			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2295			if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2296			    qpi->rxq.max_pkt_size < 64) {
2297				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2298				goto error_param;
2299			}
2300		}
2301
2302		vsi->max_frame = qpi->rxq.max_pkt_size;
2303	}
2304
2305	/* VF can request to configure less than allocated queues
2306	 * or default allocated queues. So update the VSI with new number
2307	 */
2308	vsi->num_txq = num_txq;
2309	vsi->num_rxq = num_rxq;
2310	/* All queues of VF VSI are in TC 0 */
2311	vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2312	vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
2313
2314	if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2315		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2316
2317error_param:
2318	/* send the response to the VF */
2319	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2320				     NULL, 0);
2321}
2322
2323/**
2324 * ice_is_vf_trusted
2325 * @vf: pointer to the VF info
2326 */
2327static bool ice_is_vf_trusted(struct ice_vf *vf)
2328{
2329	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2330}
2331
2332/**
2333 * ice_can_vf_change_mac
2334 * @vf: pointer to the VF info
2335 *
2336 * Return true if the VF is allowed to change its MAC filters, false otherwise
2337 */
2338static bool ice_can_vf_change_mac(struct ice_vf *vf)
2339{
2340	/* If the VF MAC address has been set administratively (via the
2341	 * ndo_set_vf_mac command), then deny permission to the VF to
2342	 * add/delete unicast MAC addresses, unless the VF is trusted
2343	 */
2344	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2345		return false;
2346
2347	return true;
2348}
2349
2350/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2351 * ice_vc_handle_mac_addr_msg
2352 * @vf: pointer to the VF info
2353 * @msg: pointer to the msg buffer
2354 * @set: true if MAC filters are being set, false otherwise
2355 *
2356 * add guest MAC address filter
2357 */
2358static int
2359ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2360{
 
 
2361	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2362	struct virtchnl_ether_addr_list *al =
2363	    (struct virtchnl_ether_addr_list *)msg;
2364	struct ice_pf *pf = vf->pf;
2365	enum virtchnl_ops vc_op;
2366	enum ice_status status;
2367	struct ice_vsi *vsi;
2368	int mac_count = 0;
2369	int i;
2370
2371	if (set)
2372		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
2373	else
 
2374		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
 
 
2375
2376	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2377	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2378		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2379		goto handle_mac_exit;
2380	}
2381
 
 
 
 
2382	if (set && !ice_is_vf_trusted(vf) &&
2383	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
2384		dev_err(&pf->pdev->dev,
2385			"Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2386			vf->vf_id);
2387		/* There is no need to let VF know about not being trusted
2388		 * to add more MAC addr, so we can just return success message.
2389		 */
2390		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2391		goto handle_mac_exit;
2392	}
2393
2394	vsi = pf->vsi[vf->lan_vsi_idx];
2395	if (!vsi) {
2396		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2397		goto handle_mac_exit;
2398	}
2399
2400	for (i = 0; i < al->num_elements; i++) {
2401		u8 *maddr = al->list[i].addr;
2402
2403		if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
2404		    is_broadcast_ether_addr(maddr)) {
2405			if (set) {
2406				/* VF is trying to add filters that the PF
2407				 * already added. Just continue.
2408				 */
2409				dev_info(&pf->pdev->dev,
2410					 "MAC %pM already set for VF %d\n",
2411					 maddr, vf->vf_id);
2412				continue;
2413			} else {
2414				/* VF can't remove dflt_lan_addr/bcast MAC */
2415				dev_err(&pf->pdev->dev,
2416					"VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
2417					maddr, vf->vf_id);
2418				continue;
2419			}
2420		}
2421
2422		/* check for the invalid cases and bail if necessary */
2423		if (is_zero_ether_addr(maddr)) {
2424			dev_err(&pf->pdev->dev,
2425				"invalid MAC %pM provided for VF %d\n",
2426				maddr, vf->vf_id);
2427			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2428			goto handle_mac_exit;
2429		}
2430
2431		if (is_unicast_ether_addr(maddr) &&
2432		    !ice_can_vf_change_mac(vf)) {
2433			dev_err(&pf->pdev->dev,
2434				"can't change unicast MAC for untrusted VF %d\n",
2435				vf->vf_id);
2436			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2437			goto handle_mac_exit;
2438		}
2439
2440		/* program the updated filter list */
2441		status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
2442		if (status == ICE_ERR_DOES_NOT_EXIST ||
2443		    status == ICE_ERR_ALREADY_EXISTS) {
2444			dev_info(&pf->pdev->dev,
2445				 "can't %s MAC filters %pM for VF %d, error %d\n",
2446				 set ? "add" : "remove", maddr, vf->vf_id,
2447				 status);
2448		} else if (status) {
2449			dev_err(&pf->pdev->dev,
2450				"can't %s MAC filters for VF %d, error %d\n",
2451				set ? "add" : "remove", vf->vf_id, status);
2452			v_ret = ice_err_to_virt_err(status);
2453			goto handle_mac_exit;
2454		}
2455
2456		mac_count++;
2457	}
2458
2459	/* Track number of MAC filters programmed for the VF VSI */
2460	if (set)
2461		vf->num_mac += mac_count;
2462	else
2463		vf->num_mac -= mac_count;
2464
2465handle_mac_exit:
2466	/* send the response to the VF */
2467	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2468}
2469
2470/**
2471 * ice_vc_add_mac_addr_msg
2472 * @vf: pointer to the VF info
2473 * @msg: pointer to the msg buffer
2474 *
2475 * add guest MAC address filter
2476 */
2477static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2478{
2479	return ice_vc_handle_mac_addr_msg(vf, msg, true);
2480}
2481
2482/**
2483 * ice_vc_del_mac_addr_msg
2484 * @vf: pointer to the VF info
2485 * @msg: pointer to the msg buffer
2486 *
2487 * remove guest MAC address filter
2488 */
2489static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2490{
2491	return ice_vc_handle_mac_addr_msg(vf, msg, false);
2492}
2493
2494/**
2495 * ice_vc_request_qs_msg
2496 * @vf: pointer to the VF info
2497 * @msg: pointer to the msg buffer
2498 *
2499 * VFs get a default number of queues but can use this message to request a
2500 * different number. If the request is successful, PF will reset the VF and
2501 * return 0. If unsuccessful, PF will send message informing VF of number of
2502 * available queue pairs via virtchnl message response to VF.
2503 */
2504static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2505{
2506	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2507	struct virtchnl_vf_res_request *vfres =
2508		(struct virtchnl_vf_res_request *)msg;
2509	u16 req_queues = vfres->num_queue_pairs;
2510	struct ice_pf *pf = vf->pf;
2511	u16 max_allowed_vf_queues;
2512	u16 tx_rx_queue_left;
 
2513	u16 cur_queues;
2514
 
2515	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2516		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2517		goto error_param;
2518	}
2519
2520	cur_queues = vf->num_vf_qs;
2521	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2522				 ice_get_avail_rxq_count(pf));
2523	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2524	if (!req_queues) {
2525		dev_err(&pf->pdev->dev,
2526			"VF %d tried to request 0 queues. Ignoring.\n",
2527			vf->vf_id);
2528	} else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
2529		dev_err(&pf->pdev->dev,
2530			"VF %d tried to request more than %d queues.\n",
2531			vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
2532		vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
2533	} else if (req_queues > cur_queues &&
2534		   req_queues - cur_queues > tx_rx_queue_left) {
2535		dev_warn(&pf->pdev->dev,
2536			 "VF %d requested %u more queues, but only %u left.\n",
2537			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2538		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2539					       ICE_MAX_BASE_QS_PER_VF);
2540	} else {
2541		/* request is successful, then reset VF */
2542		vf->num_req_qs = req_queues;
2543		ice_vc_dis_vf(vf);
2544		dev_info(&pf->pdev->dev,
2545			 "VF %d granted request of %u queues.\n",
2546			 vf->vf_id, req_queues);
2547		return 0;
2548	}
2549
2550error_param:
2551	/* send the response to the VF */
2552	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2553				     v_ret, (u8 *)vfres, sizeof(*vfres));
2554}
2555
2556/**
2557 * ice_set_vf_port_vlan
2558 * @netdev: network interface device structure
2559 * @vf_id: VF identifier
2560 * @vlan_id: VLAN ID being set
2561 * @qos: priority setting
2562 * @vlan_proto: VLAN protocol
2563 *
2564 * program VF Port VLAN ID and/or QoS
2565 */
2566int
2567ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
2568		     __be16 vlan_proto)
2569{
2570	u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
2571	struct ice_netdev_priv *np = netdev_priv(netdev);
2572	struct ice_pf *pf = np->vsi->back;
2573	struct ice_vsi *vsi;
2574	struct ice_vf *vf;
2575	int ret = 0;
 
2576
2577	/* validate the request */
2578	if (vf_id >= pf->num_alloc_vfs) {
2579		dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
2580		return -EINVAL;
2581	}
2582
2583	if (vlan_id > ICE_MAX_VLANID || qos > 7) {
2584		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
 
2585		return -EINVAL;
2586	}
2587
2588	if (vlan_proto != htons(ETH_P_8021Q)) {
2589		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
2590		return -EPROTONOSUPPORT;
2591	}
2592
2593	vf = &pf->vf[vf_id];
2594	vsi = pf->vsi[vf->lan_vsi_idx];
2595	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2596		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
2597		return -EBUSY;
2598	}
2599
2600	if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
2601		/* duplicate request, so just return success */
2602		dev_info(&pf->pdev->dev,
2603			 "Duplicate pvid %d request\n", vlanprio);
2604		return ret;
2605	}
2606
2607	/* If PVID, then remove all filters on the old VLAN */
2608	if (vsi->info.pvid)
2609		ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2610				  VLAN_VID_MASK));
2611
2612	if (vlan_id || qos) {
2613		ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
2614		if (ret)
2615			goto error_set_pvid;
2616	} else {
2617		ice_vsi_manage_pvid(vsi, 0, false);
2618		vsi->info.pvid = 0;
2619	}
2620
2621	if (vlan_id) {
2622		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
 
 
2623			 vlan_id, qos, vf_id);
 
 
2624
2625		/* add new VLAN filter for each MAC */
2626		ret = ice_vsi_add_vlan(vsi, vlan_id);
2627		if (ret)
2628			goto error_set_pvid;
2629	}
2630
2631	/* The Port VLAN needs to be saved across resets the same as the
2632	 * default LAN MAC address.
2633	 */
2634	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2635
2636error_set_pvid:
2637	return ret;
 
 
 
 
 
 
 
2638}
2639
2640/**
2641 * ice_vc_process_vlan_msg
2642 * @vf: pointer to the VF info
2643 * @msg: pointer to the msg buffer
2644 * @add_v: Add VLAN if true, otherwise delete VLAN
2645 *
2646 * Process virtchnl op to add or remove programmed guest VLAN ID
2647 */
2648static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2649{
2650	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2651	struct virtchnl_vlan_filter_list *vfl =
2652	    (struct virtchnl_vlan_filter_list *)msg;
2653	struct ice_pf *pf = vf->pf;
2654	bool vlan_promisc = false;
2655	struct ice_vsi *vsi;
 
2656	struct ice_hw *hw;
2657	int status = 0;
2658	u8 promisc_m;
2659	int i;
2660
 
2661	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2662		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2663		goto error_param;
2664	}
2665
2666	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2667		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2668		goto error_param;
2669	}
2670
2671	if (add_v && !ice_is_vf_trusted(vf) &&
2672	    vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2673		dev_info(&pf->pdev->dev,
2674			 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2675			 vf->vf_id);
2676		/* There is no need to let VF know about being not trusted,
2677		 * so we can just return success message here
2678		 */
2679		goto error_param;
2680	}
2681
2682	for (i = 0; i < vfl->num_elements; i++) {
2683		if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
2684			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2685			dev_err(&pf->pdev->dev,
2686				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2687			goto error_param;
2688		}
2689	}
2690
2691	hw = &pf->hw;
2692	vsi = pf->vsi[vf->lan_vsi_idx];
2693	if (!vsi) {
2694		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2695		goto error_param;
2696	}
2697
2698	if (vsi->info.pvid) {
2699		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
 
 
 
 
 
2700		goto error_param;
2701	}
2702
2703	if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
2704		dev_err(&pf->pdev->dev,
2705			"%sable VLAN stripping failed for VSI %i\n",
2706			 add_v ? "en" : "dis", vsi->vsi_num);
2707		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2708		goto error_param;
2709	}
2710
2711	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2712	    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
 
2713		vlan_promisc = true;
2714
2715	if (add_v) {
2716		for (i = 0; i < vfl->num_elements; i++) {
2717			u16 vid = vfl->vlan_id[i];
2718
2719			if (!ice_is_vf_trusted(vf) &&
2720			    vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2721				dev_info(&pf->pdev->dev,
2722					 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2723					 vf->vf_id);
2724				/* There is no need to let VF know about being
2725				 * not trusted, so we can just return success
2726				 * message here as well.
2727				 */
2728				goto error_param;
2729			}
2730
2731			if (ice_vsi_add_vlan(vsi, vid)) {
 
 
 
 
 
 
 
 
2732				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2733				goto error_param;
2734			}
2735
2736			vf->num_vlan++;
2737			/* Enable VLAN pruning when VLAN is added */
2738			if (!vlan_promisc) {
2739				status = ice_cfg_vlan_pruning(vsi, true, false);
2740				if (status) {
2741					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2742					dev_err(&pf->pdev->dev,
2743						"Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2744						vid, status);
2745					goto error_param;
2746				}
2747			} else {
2748				/* Enable Ucast/Mcast VLAN promiscuous mode */
2749				promisc_m = ICE_PROMISC_VLAN_TX |
2750					    ICE_PROMISC_VLAN_RX;
2751
2752				status = ice_set_vsi_promisc(hw, vsi->idx,
2753							     promisc_m, vid);
2754				if (status) {
2755					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2756					dev_err(&pf->pdev->dev,
2757						"Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2758						vid, status);
2759				}
2760			}
2761		}
2762	} else {
2763		/* In case of non_trusted VF, number of VLAN elements passed
2764		 * to PF for removal might be greater than number of VLANs
2765		 * filter programmed for that VF - So, use actual number of
2766		 * VLANS added earlier with add VLAN opcode. In order to avoid
2767		 * removing VLAN that doesn't exist, which result to sending
2768		 * erroneous failed message back to the VF
2769		 */
2770		int num_vf_vlan;
2771
2772		num_vf_vlan = vf->num_vlan;
2773		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2774			u16 vid = vfl->vlan_id[i];
2775
 
 
 
 
 
 
 
2776			/* Make sure ice_vsi_kill_vlan is successful before
2777			 * updating VLAN information
2778			 */
2779			if (ice_vsi_kill_vlan(vsi, vid)) {
 
2780				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2781				goto error_param;
2782			}
2783
2784			vf->num_vlan--;
2785			/* Disable VLAN pruning when the last VLAN is removed */
2786			if (!vf->num_vlan)
2787				ice_cfg_vlan_pruning(vsi, false, false);
2788
2789			/* Disable Unicast/Multicast VLAN promiscuous mode */
2790			if (vlan_promisc) {
2791				promisc_m = ICE_PROMISC_VLAN_TX |
2792					    ICE_PROMISC_VLAN_RX;
2793
2794				ice_clear_vsi_promisc(hw, vsi->idx,
2795						      promisc_m, vid);
2796			}
2797		}
2798	}
2799
2800error_param:
2801	/* send the response to the VF */
2802	if (add_v)
2803		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2804					     NULL, 0);
2805	else
2806		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2807					     NULL, 0);
2808}
2809
2810/**
2811 * ice_vc_add_vlan_msg
2812 * @vf: pointer to the VF info
2813 * @msg: pointer to the msg buffer
2814 *
2815 * Add and program guest VLAN ID
2816 */
2817static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2818{
2819	return ice_vc_process_vlan_msg(vf, msg, true);
2820}
2821
2822/**
2823 * ice_vc_remove_vlan_msg
2824 * @vf: pointer to the VF info
2825 * @msg: pointer to the msg buffer
2826 *
2827 * remove programmed guest VLAN ID
2828 */
2829static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2830{
2831	return ice_vc_process_vlan_msg(vf, msg, false);
2832}
2833
2834/**
2835 * ice_vc_ena_vlan_stripping
2836 * @vf: pointer to the VF info
2837 *
2838 * Enable VLAN header stripping for a given VF
2839 */
2840static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2841{
2842	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2843	struct ice_pf *pf = vf->pf;
2844	struct ice_vsi *vsi;
2845
2846	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2847		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2848		goto error_param;
2849	}
2850
 
 
 
 
 
2851	vsi = pf->vsi[vf->lan_vsi_idx];
2852	if (ice_vsi_manage_vlan_stripping(vsi, true))
2853		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2854
2855error_param:
2856	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2857				     v_ret, NULL, 0);
2858}
2859
2860/**
2861 * ice_vc_dis_vlan_stripping
2862 * @vf: pointer to the VF info
2863 *
2864 * Disable VLAN header stripping for a given VF
2865 */
2866static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2867{
2868	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2869	struct ice_pf *pf = vf->pf;
2870	struct ice_vsi *vsi;
2871
2872	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2873		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2874		goto error_param;
2875	}
2876
 
 
 
 
 
2877	vsi = pf->vsi[vf->lan_vsi_idx];
2878	if (!vsi) {
2879		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2880		goto error_param;
2881	}
2882
2883	if (ice_vsi_manage_vlan_stripping(vsi, false))
2884		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2885
2886error_param:
2887	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2888				     v_ret, NULL, 0);
2889}
2890
2891/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2892 * ice_vc_process_vf_msg - Process request from VF
2893 * @pf: pointer to the PF structure
2894 * @event: pointer to the AQ event
2895 *
2896 * called from the common asq/arq handler to
2897 * process request from VF
2898 */
2899void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
2900{
2901	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
2902	s16 vf_id = le16_to_cpu(event->desc.retval);
2903	u16 msglen = event->msg_len;
2904	u8 *msg = event->msg_buf;
2905	struct ice_vf *vf = NULL;
 
2906	int err = 0;
2907
2908	if (vf_id >= pf->num_alloc_vfs) {
 
2909		err = -EINVAL;
2910		goto error_handler;
2911	}
2912
2913	vf = &pf->vf[vf_id];
2914
2915	/* Check if VF is disabled. */
2916	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
2917		err = -EPERM;
2918		goto error_handler;
2919	}
2920
2921	/* Perform basic checks on the msg */
2922	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2923	if (err) {
2924		if (err == VIRTCHNL_STATUS_ERR_PARAM)
2925			err = -EPERM;
2926		else
2927			err = -EINVAL;
2928	}
2929
2930error_handler:
2931	if (err) {
2932		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
2933				      NULL, 0);
2934		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
2935			vf_id, v_opcode, msglen, err);
2936		return;
2937	}
2938
2939	switch (v_opcode) {
2940	case VIRTCHNL_OP_VERSION:
2941		err = ice_vc_get_ver_msg(vf, msg);
2942		break;
2943	case VIRTCHNL_OP_GET_VF_RESOURCES:
2944		err = ice_vc_get_vf_res_msg(vf, msg);
 
 
 
2945		ice_vc_notify_vf_link_state(vf);
2946		break;
2947	case VIRTCHNL_OP_RESET_VF:
2948		ice_vc_reset_vf_msg(vf);
2949		break;
2950	case VIRTCHNL_OP_ADD_ETH_ADDR:
2951		err = ice_vc_add_mac_addr_msg(vf, msg);
2952		break;
2953	case VIRTCHNL_OP_DEL_ETH_ADDR:
2954		err = ice_vc_del_mac_addr_msg(vf, msg);
2955		break;
2956	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2957		err = ice_vc_cfg_qs_msg(vf, msg);
2958		break;
2959	case VIRTCHNL_OP_ENABLE_QUEUES:
2960		err = ice_vc_ena_qs_msg(vf, msg);
2961		ice_vc_notify_vf_link_state(vf);
2962		break;
2963	case VIRTCHNL_OP_DISABLE_QUEUES:
2964		err = ice_vc_dis_qs_msg(vf, msg);
2965		break;
2966	case VIRTCHNL_OP_REQUEST_QUEUES:
2967		err = ice_vc_request_qs_msg(vf, msg);
2968		break;
2969	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2970		err = ice_vc_cfg_irq_map_msg(vf, msg);
2971		break;
2972	case VIRTCHNL_OP_CONFIG_RSS_KEY:
2973		err = ice_vc_config_rss_key(vf, msg);
2974		break;
2975	case VIRTCHNL_OP_CONFIG_RSS_LUT:
2976		err = ice_vc_config_rss_lut(vf, msg);
2977		break;
2978	case VIRTCHNL_OP_GET_STATS:
2979		err = ice_vc_get_stats_msg(vf, msg);
2980		break;
 
 
 
2981	case VIRTCHNL_OP_ADD_VLAN:
2982		err = ice_vc_add_vlan_msg(vf, msg);
2983		break;
2984	case VIRTCHNL_OP_DEL_VLAN:
2985		err = ice_vc_remove_vlan_msg(vf, msg);
2986		break;
2987	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2988		err = ice_vc_ena_vlan_stripping(vf);
2989		break;
2990	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2991		err = ice_vc_dis_vlan_stripping(vf);
2992		break;
2993	case VIRTCHNL_OP_UNKNOWN:
2994	default:
2995		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2996			v_opcode, vf_id);
2997		err = ice_vc_send_msg_to_vf(vf, v_opcode,
2998					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
2999					    NULL, 0);
3000		break;
3001	}
3002	if (err) {
3003		/* Helper function cares less about error return values here
3004		 * as it is busy with pending work.
3005		 */
3006		dev_info(&pf->pdev->dev,
3007			 "PF failed to honor VF %d, opcode %d, error %d\n",
3008			 vf_id, v_opcode, err);
3009	}
3010}
3011
3012/**
3013 * ice_get_vf_cfg
3014 * @netdev: network interface device structure
3015 * @vf_id: VF identifier
3016 * @ivi: VF configuration structure
3017 *
3018 * return VF configuration
3019 */
3020int
3021ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3022{
3023	struct ice_netdev_priv *np = netdev_priv(netdev);
3024	struct ice_vsi *vsi = np->vsi;
3025	struct ice_pf *pf = vsi->back;
3026	struct ice_vf *vf;
3027
3028	/* validate the request */
3029	if (vf_id >= pf->num_alloc_vfs) {
3030		netdev_err(netdev, "invalid VF id: %d\n", vf_id);
3031		return -EINVAL;
3032	}
3033
3034	vf = &pf->vf[vf_id];
3035	vsi = pf->vsi[vf->lan_vsi_idx];
3036
3037	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3038		netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
3039		return -EBUSY;
3040	}
3041
3042	ivi->vf = vf_id;
3043	ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3044
3045	/* VF configuration for VLAN and applicable QoS */
3046	ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
3047	ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
3048		    ICE_VLAN_PRIORITY_S;
3049
3050	ivi->trusted = vf->trusted;
3051	ivi->spoofchk = vf->spoofchk;
3052	if (!vf->link_forced)
3053		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3054	else if (vf->link_up)
3055		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3056	else
3057		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3058	ivi->max_tx_rate = vf->tx_rate;
3059	ivi->min_tx_rate = 0;
3060	return 0;
3061}
3062
3063/**
3064 * ice_set_vf_spoofchk
3065 * @netdev: network interface device structure
3066 * @vf_id: VF identifier
3067 * @ena: flag to enable or disable feature
3068 *
3069 * Enable or disable VF spoof checking
3070 */
3071int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
3072{
3073	struct ice_netdev_priv *np = netdev_priv(netdev);
3074	struct ice_vsi *vsi = np->vsi;
3075	struct ice_pf *pf = vsi->back;
3076	struct ice_vsi_ctx *ctx;
3077	enum ice_status status;
3078	struct ice_vf *vf;
3079	int ret = 0;
3080
3081	/* validate the request */
3082	if (vf_id >= pf->num_alloc_vfs) {
3083		netdev_err(netdev, "invalid VF id: %d\n", vf_id);
3084		return -EINVAL;
3085	}
3086
3087	vf = &pf->vf[vf_id];
3088	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3089		netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
3090		return -EBUSY;
3091	}
3092
3093	if (ena == vf->spoofchk) {
3094		dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
3095			ena ? "ON" : "OFF");
3096		return 0;
3097	}
3098
3099	ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
3100	if (!ctx)
3101		return -ENOMEM;
3102
3103	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
3104
3105	if (ena) {
3106		ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
3107		ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
3108	}
3109
3110	status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3111	if (status) {
3112		dev_dbg(&pf->pdev->dev,
3113			"Error %d, failed to update VSI* parameters\n", status);
3114		ret = -EIO;
3115		goto out;
3116	}
3117
3118	vf->spoofchk = ena;
3119	vsi->info.sec_flags = ctx->info.sec_flags;
3120	vsi->info.sw_flags2 = ctx->info.sw_flags2;
3121out:
3122	devm_kfree(&pf->pdev->dev, ctx);
3123	return ret;
3124}
3125
3126/**
3127 * ice_set_vf_mac
3128 * @netdev: network interface device structure
3129 * @vf_id: VF identifier
3130 * @mac: MAC address
3131 *
3132 * program VF MAC address
3133 */
3134int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3135{
3136	struct ice_netdev_priv *np = netdev_priv(netdev);
3137	struct ice_vsi *vsi = np->vsi;
3138	struct ice_pf *pf = vsi->back;
3139	struct ice_vf *vf;
3140	int ret = 0;
3141
3142	/* validate the request */
3143	if (vf_id >= pf->num_alloc_vfs) {
3144		netdev_err(netdev, "invalid VF id: %d\n", vf_id);
 
 
3145		return -EINVAL;
3146	}
3147
3148	vf = &pf->vf[vf_id];
3149	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3150		netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
3151		return -EBUSY;
3152	}
3153
3154	if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3155		netdev_err(netdev, "%pM not a valid unicast address\n", mac);
 
 
 
 
 
3156		return -EINVAL;
3157	}
3158
3159	/* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
3160	 * flow will use the updated dflt_lan_addr and add a MAC filter
3161	 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3162	 * set the MAC address for this VF.
3163	 */
3164	ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3165	vf->pf_set_mac = true;
3166	netdev_info(netdev,
3167		    "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
3168		    vf_id, mac);
 
 
 
 
 
 
 
3169
3170	ice_vc_dis_vf(vf);
3171	return ret;
3172}
3173
3174/**
3175 * ice_set_vf_trust
3176 * @netdev: network interface device structure
3177 * @vf_id: VF identifier
3178 * @trusted: Boolean value to enable/disable trusted VF
3179 *
3180 * Enable or disable a given VF as trusted
3181 */
3182int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3183{
3184	struct ice_netdev_priv *np = netdev_priv(netdev);
3185	struct ice_vsi *vsi = np->vsi;
3186	struct ice_pf *pf = vsi->back;
3187	struct ice_vf *vf;
 
3188
3189	/* validate the request */
3190	if (vf_id >= pf->num_alloc_vfs) {
3191		dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
3192		return -EINVAL;
3193	}
3194
3195	vf = &pf->vf[vf_id];
3196	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3197		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
3198		return -EBUSY;
3199	}
3200
3201	/* Check if already trusted */
3202	if (trusted == vf->trusted)
3203		return 0;
3204
3205	vf->trusted = trusted;
3206	ice_vc_dis_vf(vf);
3207	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
3208		 vf_id, trusted ? "" : "un");
3209
3210	return 0;
3211}
3212
3213/**
3214 * ice_set_vf_link_state
3215 * @netdev: network interface device structure
3216 * @vf_id: VF identifier
3217 * @link_state: required link state
3218 *
3219 * Set VF's link state, irrespective of physical link state status
3220 */
3221int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3222{
3223	struct ice_netdev_priv *np = netdev_priv(netdev);
3224	struct ice_pf *pf = np->vsi->back;
3225	struct virtchnl_pf_event pfe = { 0 };
3226	struct ice_link_status *ls;
3227	struct ice_vf *vf;
3228	struct ice_hw *hw;
3229
3230	if (vf_id >= pf->num_alloc_vfs) {
3231		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3232		return -EINVAL;
3233	}
3234
3235	vf = &pf->vf[vf_id];
3236	hw = &pf->hw;
3237	ls = &pf->hw.port_info->phy.link_info;
3238
3239	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3240		dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
3241		return -EBUSY;
3242	}
3243
3244	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
3245	pfe.severity = PF_EVENT_SEVERITY_INFO;
3246
3247	switch (link_state) {
3248	case IFLA_VF_LINK_STATE_AUTO:
3249		vf->link_forced = false;
3250		vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
3251		break;
3252	case IFLA_VF_LINK_STATE_ENABLE:
3253		vf->link_forced = true;
3254		vf->link_up = true;
3255		break;
3256	case IFLA_VF_LINK_STATE_DISABLE:
3257		vf->link_forced = true;
3258		vf->link_up = false;
3259		break;
3260	default:
3261		return -EINVAL;
3262	}
3263
3264	if (vf->link_forced)
3265		ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
3266	else
3267		ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
3268
3269	/* Notify the VF of its new link state */
3270	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
3271			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
3272			      sizeof(pfe), NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3273
3274	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3275}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_base.h"
   6#include "ice_lib.h"
   7#include "ice_fltr.h"
   8
   9/**
  10 * ice_validate_vf_id - helper to check if VF ID is valid
  11 * @pf: pointer to the PF structure
  12 * @vf_id: the ID of the VF to check
  13 */
  14static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
  15{
  16	/* vf_id range is only valid for 0-255, and should always be unsigned */
  17	if (vf_id >= pf->num_alloc_vfs) {
  18		dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
  19		return -EINVAL;
  20	}
  21	return 0;
  22}
  23
  24/**
  25 * ice_check_vf_init - helper to check if VF init complete
  26 * @pf: pointer to the PF structure
  27 * @vf: the pointer to the VF to check
  28 */
  29static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
  30{
  31	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
  32		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
  33			vf->vf_id);
  34		return -EBUSY;
  35	}
  36	return 0;
  37}
  38
  39/**
  40 * ice_err_to_virt_err - translate errors for VF return code
  41 * @ice_err: error return code
  42 */
  43static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
  44{
  45	switch (ice_err) {
  46	case ICE_SUCCESS:
  47		return VIRTCHNL_STATUS_SUCCESS;
  48	case ICE_ERR_BAD_PTR:
  49	case ICE_ERR_INVAL_SIZE:
  50	case ICE_ERR_DEVICE_NOT_SUPPORTED:
  51	case ICE_ERR_PARAM:
  52	case ICE_ERR_CFG:
  53		return VIRTCHNL_STATUS_ERR_PARAM;
  54	case ICE_ERR_NO_MEMORY:
  55		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
  56	case ICE_ERR_NOT_READY:
  57	case ICE_ERR_RESET_FAILED:
  58	case ICE_ERR_FW_API_VER:
  59	case ICE_ERR_AQ_ERROR:
  60	case ICE_ERR_AQ_TIMEOUT:
  61	case ICE_ERR_AQ_FULL:
  62	case ICE_ERR_AQ_NO_WORK:
  63	case ICE_ERR_AQ_EMPTY:
  64		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
  65	default:
  66		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
  67	}
  68}
  69
  70/**
  71 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
  72 * @pf: pointer to the PF structure
  73 * @v_opcode: operation code
  74 * @v_retval: return value
  75 * @msg: pointer to the msg buffer
  76 * @msglen: msg length
  77 */
  78static void
  79ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
  80		    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
  81{
  82	struct ice_hw *hw = &pf->hw;
  83	unsigned int i;
  84
  85	ice_for_each_vf(pf, i) {
  86		struct ice_vf *vf = &pf->vf[i];
  87
 
  88		/* Not all vfs are enabled so skip the ones that are not */
  89		if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
  90		    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
  91			continue;
  92
  93		/* Ignore return value on purpose - a given VF may fail, but
  94		 * we need to keep going and send to all of them
  95		 */
  96		ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
  97				      msglen, NULL);
  98	}
  99}
 100
 101/**
 102 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
 103 * @vf: pointer to the VF structure
 104 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
 105 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
 106 * @link_up: whether or not to set the link up/down
 107 */
 108static void
 109ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
 110		 int ice_link_speed, bool link_up)
 111{
 112	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
 113		pfe->event_data.link_event_adv.link_status = link_up;
 114		/* Speed in Mbps */
 115		pfe->event_data.link_event_adv.link_speed =
 116			ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
 117	} else {
 118		pfe->event_data.link_event.link_status = link_up;
 119		/* Legacy method for virtchnl link speeds */
 120		pfe->event_data.link_event.link_speed =
 121			(enum virtchnl_link_speed)
 122			ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
 123	}
 124}
 125
 126/**
 127 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
 128 * @vf: the VF to check
 129 *
 130 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
 131 * otherwise
 132 */
 133static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
 
 
 134{
 135	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
 136		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
 137}
 138
 139/**
 140 * ice_is_vf_link_up - check if the VF's link is up
 141 * @vf: VF to check if link is up
 142 */
 143static bool ice_is_vf_link_up(struct ice_vf *vf)
 144{
 145	struct ice_pf *pf = vf->pf;
 146
 147	if (ice_check_vf_init(pf, vf))
 148		return false;
 149
 150	if (ice_vf_has_no_qs_ena(vf))
 151		return false;
 152	else if (vf->link_forced)
 153		return vf->link_up;
 154	else
 155		return pf->hw.port_info->phy.link_info.link_info &
 156			ICE_AQ_LINK_UP;
 157}
 158
 159/**
 160 * ice_vc_notify_vf_link_state - Inform a VF of link status
 161 * @vf: pointer to the VF structure
 162 *
 163 * send a link status message to a single VF
 164 */
 165static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
 166{
 167	struct virtchnl_pf_event pfe = { 0 };
 168	struct ice_hw *hw = &vf->pf->hw;
 
 
 
 
 
 169
 170	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
 171	pfe.severity = PF_EVENT_SEVERITY_INFO;
 172
 173	if (ice_is_vf_link_up(vf))
 174		ice_set_pfe_link(vf, &pfe,
 175				 hw->port_info->phy.link_info.link_speed, true);
 
 
 176	else
 177		ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
 
 178
 179	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
 180			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
 181			      sizeof(pfe), NULL);
 182}
 183
 184/**
 185 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
 186 * @vf: VF to remove access to VSI for
 187 */
 188static void ice_vf_invalidate_vsi(struct ice_vf *vf)
 189{
 190	vf->lan_vsi_idx = ICE_NO_VSI;
 191	vf->lan_vsi_num = ICE_NO_VSI;
 192}
 193
 194/**
 195 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
 196 * @vf: invalidate this VF's VSI after freeing it
 197 */
 198static void ice_vf_vsi_release(struct ice_vf *vf)
 199{
 200	ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
 201	ice_vf_invalidate_vsi(vf);
 202}
 203
 204/**
 205 * ice_free_vf_res - Free a VF's resources
 206 * @vf: pointer to the VF info
 207 */
 208static void ice_free_vf_res(struct ice_vf *vf)
 209{
 210	struct ice_pf *pf = vf->pf;
 211	int i, last_vector_idx;
 212
 213	/* First, disable VF's configuration API to prevent OS from
 214	 * accessing the VF's VSI after it's freed or invalidated.
 215	 */
 216	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 217
 218	/* free VSI and disconnect it from the parent uplink */
 219	if (vf->lan_vsi_idx != ICE_NO_VSI) {
 220		ice_vf_vsi_release(vf);
 
 
 221		vf->num_mac = 0;
 222	}
 223
 224	last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
 225
 226	/* clear VF MDD event information */
 227	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
 228	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
 229
 230	/* Disable interrupts so that VF starts in a known state */
 231	for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
 232		wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
 233		ice_flush(&pf->hw);
 234	}
 235	/* reset some of the state variables keeping track of the resources */
 236	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 237	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 238}
 239
 240/**
 241 * ice_dis_vf_mappings
 242 * @vf: pointer to the VF structure
 243 */
 244static void ice_dis_vf_mappings(struct ice_vf *vf)
 245{
 246	struct ice_pf *pf = vf->pf;
 247	struct ice_vsi *vsi;
 248	struct device *dev;
 249	int first, last, v;
 250	struct ice_hw *hw;
 251
 252	hw = &pf->hw;
 253	vsi = pf->vsi[vf->lan_vsi_idx];
 254
 255	dev = ice_pf_to_dev(pf);
 256	wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
 257	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
 258
 259	first = vf->first_vector_idx;
 260	last = first + pf->num_msix_per_vf - 1;
 261	for (v = first; v <= last; v++) {
 262		u32 reg;
 263
 264		reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
 265			GLINT_VECT2FUNC_IS_PF_M) |
 266		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 267			GLINT_VECT2FUNC_PF_NUM_M));
 268		wr32(hw, GLINT_VECT2FUNC(v), reg);
 269	}
 270
 271	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
 272		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
 273	else
 274		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
 
 275
 276	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
 277		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
 278	else
 279		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
 
 280}
 281
 282/**
 283 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
 284 * @pf: pointer to the PF structure
 285 *
 286 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
 
 
 
 
 287 * the pf->sriov_base_vector.
 288 *
 289 * Returns 0 on success, and -EINVAL on error.
 290 */
 291static int ice_sriov_free_msix_res(struct ice_pf *pf)
 292{
 293	struct ice_res_tracker *res;
 294
 295	if (!pf)
 296		return -EINVAL;
 297
 298	res = pf->irq_tracker;
 299	if (!res)
 300		return -EINVAL;
 301
 302	/* give back irq_tracker resources used */
 303	WARN_ON(pf->sriov_base_vector < res->num_entries);
 
 
 
 
 304
 305	pf->sriov_base_vector = 0;
 306
 307	return 0;
 308}
 309
 310/**
 311 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
 312 * @vf: pointer to the VF structure
 313 */
 314void ice_set_vf_state_qs_dis(struct ice_vf *vf)
 315{
 316	/* Clear Rx/Tx enabled queues flag */
 317	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
 318	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
 
 319	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
 320}
 321
 322/**
 323 * ice_dis_vf_qs - Disable the VF queues
 324 * @vf: pointer to the VF structure
 325 */
 326static void ice_dis_vf_qs(struct ice_vf *vf)
 327{
 328	struct ice_pf *pf = vf->pf;
 329	struct ice_vsi *vsi;
 330
 331	vsi = pf->vsi[vf->lan_vsi_idx];
 332
 333	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
 334	ice_vsi_stop_all_rx_rings(vsi);
 335	ice_set_vf_state_qs_dis(vf);
 336}
 337
 338/**
 339 * ice_free_vfs - Free all VFs
 340 * @pf: pointer to the PF structure
 341 */
 342void ice_free_vfs(struct ice_pf *pf)
 343{
 344	struct device *dev = ice_pf_to_dev(pf);
 345	struct ice_hw *hw = &pf->hw;
 346	unsigned int tmp, i;
 347
 348	if (!pf->vf)
 349		return;
 350
 351	while (test_and_set_bit(__ICE_VF_DIS, pf->state))
 352		usleep_range(1000, 2000);
 353
 
 
 
 
 
 354	/* Disable IOV before freeing resources. This lets any VF drivers
 355	 * running in the host get themselves cleaned up before we yank
 356	 * the carpet out from underneath their feet.
 357	 */
 358	if (!pci_vfs_assigned(pf->pdev))
 359		pci_disable_sriov(pf->pdev);
 360	else
 361		dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
 362
 363	/* Avoid wait time by stopping all VFs at the same time */
 364	ice_for_each_vf(pf, i)
 365		if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
 366			ice_dis_vf_qs(&pf->vf[i]);
 367
 368	tmp = pf->num_alloc_vfs;
 369	pf->num_qps_per_vf = 0;
 370	pf->num_alloc_vfs = 0;
 371	for (i = 0; i < tmp; i++) {
 372		if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
 373			/* disable VF qp mappings and set VF disable state */
 374			ice_dis_vf_mappings(&pf->vf[i]);
 375			set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
 376			ice_free_vf_res(&pf->vf[i]);
 377		}
 378	}
 379
 380	if (ice_sriov_free_msix_res(pf))
 381		dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
 
 382
 383	devm_kfree(dev, pf->vf);
 384	pf->vf = NULL;
 385
 386	/* This check is for when the driver is unloaded while VFs are
 387	 * assigned. Setting the number of VFs to 0 through sysfs is caught
 388	 * before this function ever gets called.
 389	 */
 390	if (!pci_vfs_assigned(pf->pdev)) {
 391		unsigned int vf_id;
 392
 393		/* Acknowledge VFLR for all VFs. Without this, VFs will fail to
 394		 * work correctly when SR-IOV gets re-enabled.
 395		 */
 396		for (vf_id = 0; vf_id < tmp; vf_id++) {
 397			u32 reg_idx, bit_idx;
 398
 399			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 400			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
 401			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 402		}
 403	}
 404	clear_bit(__ICE_VF_DIS, pf->state);
 405	clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 406}
 407
 408/**
 409 * ice_trigger_vf_reset - Reset a VF on HW
 410 * @vf: pointer to the VF structure
 411 * @is_vflr: true if VFLR was issued, false if not
 412 * @is_pfr: true if the reset was triggered due to a previous PFR
 413 *
 414 * Trigger hardware to start a reset for a particular VF. Expects the caller
 415 * to wait the proper amount of time to allow hardware to reset the VF before
 416 * it cleans up and restores VF functionality.
 417 */
 418static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
 419{
 420	struct ice_pf *pf = vf->pf;
 421	u32 reg, reg_idx, bit_idx;
 422	unsigned int vf_abs_id, i;
 423	struct device *dev;
 424	struct ice_hw *hw;
 
 425
 426	dev = ice_pf_to_dev(pf);
 427	hw = &pf->hw;
 428	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 429
 430	/* Inform VF that it is no longer active, as a warning */
 431	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 432
 433	/* Disable VF's configuration API during reset. The flag is re-enabled
 434	 * when it's safe again to access VF's VSI.
 
 
 
 435	 */
 436	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 437
 438	/* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
 439	 * in the case of VFR. If this is done for PFR, it can mess up VF
 440	 * resets because the VF driver may already have started cleanup
 441	 * by the time we get here.
 442	 */
 443	if (!is_pfr)
 444		wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
 445
 446	/* In the case of a VFLR, the HW has already reset the VF and we
 447	 * just need to clean up, so don't hit the VFRTRIG register.
 448	 */
 449	if (!is_vflr) {
 450		/* reset VF using VPGEN_VFRTRIG reg */
 451		reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 452		reg |= VPGEN_VFRTRIG_VFSWR_M;
 453		wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 454	}
 455	/* clear the VFLR bit in GLGEN_VFLRSTAT */
 456	reg_idx = (vf_abs_id) / 32;
 457	bit_idx = (vf_abs_id) % 32;
 458	wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 459	ice_flush(hw);
 460
 461	wr32(hw, PF_PCI_CIAA,
 462	     VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
 463	for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
 464		reg = rd32(hw, PF_PCI_CIAD);
 465		/* no transactions pending so stop polling */
 466		if ((reg & VF_TRANS_PENDING_M) == 0)
 467			break;
 468
 469		dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
 
 470		udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
 471	}
 472}
 473
 474/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 475 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
 476 * @vsi: the VSI to update
 477 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
 478 * @enable: true for enable PVID false for disable
 479 */
 480static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
 481{
 
 482	struct ice_hw *hw = &vsi->back->hw;
 483	struct ice_aqc_vsi_props *info;
 484	struct ice_vsi_ctx *ctxt;
 485	enum ice_status status;
 486	int ret = 0;
 487
 488	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
 489	if (!ctxt)
 490		return -ENOMEM;
 491
 492	ctxt->info = vsi->info;
 493	info = &ctxt->info;
 494	if (enable) {
 495		info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
 496			ICE_AQ_VSI_PVLAN_INSERT_PVID |
 497			ICE_AQ_VSI_VLAN_EMOD_STR;
 498		info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 499	} else {
 500		info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
 501			ICE_AQ_VSI_VLAN_MODE_ALL;
 502		info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 503	}
 504
 505	info->pvid = cpu_to_le16(pvid_info);
 506	info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
 507					   ICE_AQ_VSI_PROP_SW_VALID);
 508
 509	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
 510	if (status) {
 511		dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
 512			 ice_stat_str(status),
 513			 ice_aq_str(hw->adminq.sq_last_status));
 514		ret = -EIO;
 515		goto out;
 516	}
 517
 518	vsi->info.vlan_flags = info->vlan_flags;
 519	vsi->info.sw_flags2 = info->sw_flags2;
 520	vsi->info.pvid = info->pvid;
 521out:
 522	kfree(ctxt);
 523	return ret;
 524}
 525
 526/**
 527 * ice_vf_get_port_info - Get the VF's port info structure
 528 * @vf: VF used to get the port info structure for
 529 */
 530static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
 531{
 532	return vf->pf->hw.port_info;
 533}
 534
 535/**
 536 * ice_vf_vsi_setup - Set up a VF VSI
 537 * @vf: VF to setup VSI for
 
 
 538 *
 539 * Returns pointer to the successfully allocated VSI struct on success,
 540 * otherwise returns NULL on failure.
 541 */
 542static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
 
 543{
 544	struct ice_port_info *pi = ice_vf_get_port_info(vf);
 545	struct ice_pf *pf = vf->pf;
 546	struct ice_vsi *vsi;
 547
 548	vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
 549
 550	if (!vsi) {
 551		dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
 552		ice_vf_invalidate_vsi(vf);
 553		return NULL;
 554	}
 555
 556	vf->lan_vsi_idx = vsi->idx;
 557	vf->lan_vsi_num = vsi->vsi_num;
 558
 559	return vsi;
 560}
 561
 562/**
 563 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
 564 * @pf: pointer to PF structure
 565 * @vf: pointer to VF that the first MSIX vector index is being calculated for
 566 *
 567 * This returns the first MSIX vector index in PF space that is used by this VF.
 568 * This index is used when accessing PF relative registers such as
 569 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
 570 * This will always be the OICR index in the AVF driver so any functionality
 571 * using vf->first_vector_idx for queue configuration will have to increment by
 572 * 1 to avoid meddling with the OICR index.
 573 */
 574static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
 575{
 576	return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
 577}
 578
 579/**
 580 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
 581 * @vf: VF to add MAC filters for
 582 *
 583 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 584 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
 585 */
 586static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
 587{
 588	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 589	struct device *dev = ice_pf_to_dev(vf->pf);
 590	u16 vlan_id = 0;
 591	int err;
 
 592
 593	if (vf->port_vlan_info) {
 594		err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
 595		if (err) {
 596			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
 597				vf->vf_id, err);
 598			return err;
 599		}
 600
 601		vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
 
 
 
 602	}
 603
 604	/* vlan_id will either be 0 or the port VLAN number */
 605	err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
 606	if (err) {
 607		dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
 608			vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
 609			err);
 610		return err;
 611	}
 612
 613	return 0;
 614}
 615
 616/**
 617 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
 618 * @vf: VF to add MAC filters for
 619 *
 620 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 621 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
 622 */
 623static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
 624{
 625	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 626	struct device *dev = ice_pf_to_dev(vf->pf);
 627	enum ice_status status;
 628	u8 broadcast[ETH_ALEN];
 629
 630	eth_broadcast_addr(broadcast);
 631	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
 632	if (status) {
 633		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
 634			vf->vf_id, ice_stat_str(status));
 635		return ice_status_to_errno(status);
 636	}
 637
 638	vf->num_mac++;
 
 
 639
 640	if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
 641		status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
 642					  ICE_FWD_TO_VSI);
 643		if (status) {
 644			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
 645				&vf->dflt_lan_addr.addr[0], vf->vf_id,
 646				ice_stat_str(status));
 647			return ice_status_to_errno(status);
 648		}
 649		vf->num_mac++;
 650	}
 651
 652	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 653}
 654
 655/**
 656 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
 657 * @vf: VF to configure trust setting for
 658 */
 659static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
 660{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661	if (vf->trusted)
 662		set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 663	else
 664		clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 
 
 
 
 
 
 
 
 
 665}
 666
 667/**
 668 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
 669 * @vf: VF to enable MSIX mappings for
 670 *
 671 * Some of the registers need to be indexed/configured using hardware global
 672 * device values and other registers need 0-based values, which represent PF
 673 * based values.
 674 */
 675static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
 676{
 677	int device_based_first_msix, device_based_last_msix;
 678	int pf_based_first_msix, pf_based_last_msix, v;
 679	struct ice_pf *pf = vf->pf;
 680	int device_based_vf_id;
 
 681	struct ice_hw *hw;
 682	u32 reg;
 683
 684	hw = &pf->hw;
 685	pf_based_first_msix = vf->first_vector_idx;
 686	pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
 687
 688	device_based_first_msix = pf_based_first_msix +
 689		pf->hw.func_caps.common_cap.msix_vector_first_id;
 690	device_based_last_msix =
 691		(device_based_first_msix + pf->num_msix_per_vf) - 1;
 692	device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 693
 694	reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
 695		VPINT_ALLOC_FIRST_M) |
 696	       ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
 697		VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
 698	wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
 699
 700	reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
 701		 & VPINT_ALLOC_PCI_FIRST_M) |
 702	       ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
 703		VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
 704	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
 705
 706	/* map the interrupts to its functions */
 707	for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
 708		reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
 709			GLINT_VECT2FUNC_VF_NUM_M) |
 710		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 711			GLINT_VECT2FUNC_PF_NUM_M));
 712		wr32(hw, GLINT_VECT2FUNC(v), reg);
 713	}
 714
 715	/* Map mailbox interrupt to VF MSI-X vector 0 */
 716	wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
 717}
 718
 719/**
 720 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
 721 * @vf: VF to enable the mappings for
 722 * @max_txq: max Tx queues allowed on the VF's VSI
 723 * @max_rxq: max Rx queues allowed on the VF's VSI
 724 */
 725static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
 726{
 727	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 728	struct device *dev = ice_pf_to_dev(vf->pf);
 729	struct ice_hw *hw = &vf->pf->hw;
 730	u32 reg;
 731
 732	/* set regardless of mapping mode */
 733	wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
 734
 735	/* VF Tx queues allocation */
 736	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 737		/* set the VF PF Tx queue range
 738		 * VFNUMQ value should be set to (number of queues - 1). A value
 739		 * of 0 means 1 queue and a value of 255 means 256 queues
 740		 */
 741		reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
 742			VPLAN_TX_QBASE_VFFIRSTQ_M) |
 743		       (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
 744			VPLAN_TX_QBASE_VFNUMQ_M));
 745		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
 746	} else {
 747		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
 
 748	}
 749
 750	/* set regardless of mapping mode */
 751	wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
 752
 753	/* VF Rx queues allocation */
 754	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 755		/* set the VF PF Rx queue range
 756		 * VFNUMQ value should be set to (number of queues - 1). A value
 757		 * of 0 means 1 queue and a value of 255 means 256 queues
 758		 */
 759		reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
 760			VPLAN_RX_QBASE_VFFIRSTQ_M) |
 761		       (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
 762			VPLAN_RX_QBASE_VFNUMQ_M));
 763		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
 764	} else {
 765		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
 
 766	}
 767}
 768
 769/**
 770 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
 771 * @vf: pointer to the VF structure
 772 */
 773static void ice_ena_vf_mappings(struct ice_vf *vf)
 774{
 775	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 776
 777	ice_ena_vf_msix_mappings(vf);
 778	ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
 779}
 780
 781/**
 782 * ice_determine_res
 783 * @pf: pointer to the PF structure
 784 * @avail_res: available resources in the PF structure
 785 * @max_res: maximum resources that can be given per VF
 786 * @min_res: minimum resources that can be given per VF
 787 *
 788 * Returns non-zero value if resources (queues/vectors) are available or
 789 * returns zero if PF cannot accommodate for all num_alloc_vfs.
 790 */
 791static int
 792ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
 793{
 794	bool checked_min_res = false;
 795	int res;
 796
 797	/* start by checking if PF can assign max number of resources for
 798	 * all num_alloc_vfs.
 799	 * if yes, return number per VF
 800	 * If no, divide by 2 and roundup, check again
 801	 * repeat the loop till we reach a point where even minimum resources
 802	 * are not available, in that case return 0
 803	 */
 804	res = max_res;
 805	while ((res >= min_res) && !checked_min_res) {
 806		int num_all_res;
 807
 808		num_all_res = pf->num_alloc_vfs * res;
 809		if (num_all_res <= avail_res)
 810			return res;
 811
 812		if (res == min_res)
 813			checked_min_res = true;
 814
 815		res = DIV_ROUND_UP(res, 2);
 816	}
 817	return 0;
 818}
 819
 820/**
 821 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
 822 * @vf: VF to calculate the register index for
 823 * @q_vector: a q_vector associated to the VF
 824 */
 825int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
 826{
 827	struct ice_pf *pf;
 828
 829	if (!vf || !q_vector)
 830		return -EINVAL;
 831
 832	pf = vf->pf;
 833
 834	/* always add one to account for the OICR being the first MSIX */
 835	return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
 836		q_vector->v_idx + 1;
 837}
 838
 839/**
 840 * ice_get_max_valid_res_idx - Get the max valid resource index
 841 * @res: pointer to the resource to find the max valid index for
 842 *
 843 * Start from the end of the ice_res_tracker and return right when we find the
 844 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
 845 * valid for SR-IOV because it is the only consumer that manipulates the
 846 * res->end and this is always called when res->end is set to res->num_entries.
 847 */
 848static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
 849{
 850	int i;
 851
 852	if (!res)
 853		return -EINVAL;
 854
 855	for (i = res->num_entries - 1; i >= 0; i--)
 856		if (res->list[i] & ICE_RES_VALID_BIT)
 857			return i;
 858
 859	return 0;
 860}
 861
 862/**
 863 * ice_sriov_set_msix_res - Set any used MSIX resources
 864 * @pf: pointer to PF structure
 865 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
 866 *
 867 * This function allows SR-IOV resources to be taken from the end of the PF's
 868 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
 869 * just set the pf->sriov_base_vector and return success.
 870 *
 871 * If there are not enough resources available, return an error. This should
 872 * always be caught by ice_set_per_vf_res().
 
 
 
 873 *
 874 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
 875 * in the PF's space available for SR-IOV.
 876 */
 877static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
 878{
 879	u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
 880	int vectors_used = pf->irq_tracker->num_entries;
 
 
 881	int sriov_base_vector;
 882
 883	sriov_base_vector = total_vectors - num_msix_needed;
 
 
 
 884
 885	/* make sure we only grab irq_tracker entries from the list end and
 886	 * that we have enough available MSIX vectors
 887	 */
 888	if (sriov_base_vector < vectors_used)
 889		return -EINVAL;
 890
 891	pf->sriov_base_vector = sriov_base_vector;
 892
 
 
 
 
 
 
 
 893	return 0;
 894}
 895
 896/**
 897 * ice_set_per_vf_res - check if vectors and queues are available
 898 * @pf: pointer to the PF structure
 899 *
 900 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
 901 * get more vectors and can enable more queues per VF. Note that this does not
 902 * grab any vectors from the SW pool already allocated. Also note, that all
 903 * vector counts include one for each VF's miscellaneous interrupt vector
 904 * (i.e. OICR).
 905 *
 906 * Minimum VFs - 2 vectors, 1 queue pair
 907 * Small VFs - 5 vectors, 4 queue pairs
 908 * Medium VFs - 17 vectors, 16 queue pairs
 909 *
 910 * Second, determine number of queue pairs per VF by starting with a pre-defined
 911 * maximum each VF supports. If this is not possible, then we adjust based on
 912 * queue pairs available on the device.
 913 *
 914 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
 915 * by each VF during VF initialization and reset.
 916 */
 917static int ice_set_per_vf_res(struct ice_pf *pf)
 918{
 919	int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
 920	int msix_avail_per_vf, msix_avail_for_sriov;
 921	struct device *dev = ice_pf_to_dev(pf);
 922	u16 num_msix_per_vf, num_txq, num_rxq;
 923
 924	if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
 925		return -EINVAL;
 926
 927	/* determine MSI-X resources per VF */
 928	msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
 929		pf->irq_tracker->num_entries;
 930	msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
 931	if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
 932		num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
 933	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
 934		num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
 935	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
 936		num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
 937	} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
 938		num_msix_per_vf = ICE_MIN_INTR_PER_VF;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939	} else {
 940		dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
 941			msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
 942			pf->num_alloc_vfs);
 943		return -EIO;
 944	}
 945
 946	/* determine queue resources per VF */
 
 
 
 
 
 
 
 
 
 947	num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
 948				    min_t(u16,
 949					  num_msix_per_vf - ICE_NONQ_VECS_VF,
 950					  ICE_MAX_RSS_QS_PER_VF),
 951				    ICE_MIN_QS_PER_VF);
 952
 953	num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
 954				    min_t(u16,
 955					  num_msix_per_vf - ICE_NONQ_VECS_VF,
 956					  ICE_MAX_RSS_QS_PER_VF),
 957				    ICE_MIN_QS_PER_VF);
 958
 959	if (!num_txq || !num_rxq) {
 960		dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
 961			ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
 962		return -EIO;
 963	}
 964
 965	if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
 966		dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
 967			pf->num_alloc_vfs);
 968		return -EINVAL;
 969	}
 970
 971	/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
 972	pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
 973	pf->num_msix_per_vf = num_msix_per_vf;
 974	dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
 975		 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
 
 976
 977	return 0;
 978}
 979
 980/**
 981 * ice_clear_vf_reset_trigger - enable VF to access hardware
 982 * @vf: VF to enabled hardware access for
 
 
 
 
 
 983 */
 984static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
 985{
 986	struct ice_hw *hw = &vf->pf->hw;
 
 987	u32 reg;
 988
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 989	reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 990	reg &= ~VPGEN_VFRTRIG_VFSWR_M;
 991	wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 992	ice_flush(hw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 993}
 994
 995/**
 996 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
 997 * @vf: pointer to the VF info
 998 * @vsi: the VSI being configured
 999 * @promisc_m: mask of promiscuous config bits
1000 * @rm_promisc: promisc flag request from the VF to remove or add filter
1001 *
1002 * This function configures VF VSI promiscuous mode, based on the VF requests,
1003 * for Unicast, Multicast and VLAN
1004 */
1005static enum ice_status
1006ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1007		       bool rm_promisc)
1008{
1009	struct ice_pf *pf = vf->pf;
1010	enum ice_status status = 0;
1011	struct ice_hw *hw;
1012
1013	hw = &pf->hw;
1014	if (vsi->num_vlan) {
1015		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1016						  rm_promisc);
1017	} else if (vf->port_vlan_info) {
1018		if (rm_promisc)
1019			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1020						       vf->port_vlan_info);
1021		else
1022			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1023						     vf->port_vlan_info);
1024	} else {
1025		if (rm_promisc)
1026			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1027						       0);
1028		else
1029			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1030						     0);
1031	}
1032
1033	return status;
1034}
1035
1036static void ice_vf_clear_counters(struct ice_vf *vf)
1037{
1038	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1039
1040	vf->num_mac = 0;
1041	vsi->num_vlan = 0;
1042	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1043	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1044}
1045
1046/**
1047 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1048 * @vf: VF to perform pre VSI rebuild tasks
1049 *
1050 * These tasks are items that don't need to be amortized since they are most
1051 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
 
 
1052 */
1053static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1054{
1055	ice_vf_clear_counters(vf);
1056	ice_clear_vf_reset_trigger(vf);
1057}
1058
1059/**
1060 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1061 * @vf: VF to rebuild host configuration on
1062 */
1063static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1064{
1065	struct device *dev = ice_pf_to_dev(vf->pf);
1066
1067	ice_vf_set_host_trust_cfg(vf);
1068
1069	if (ice_vf_rebuild_host_mac_cfg(vf))
1070		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1071			vf->vf_id);
1072
1073	if (ice_vf_rebuild_host_vlan_cfg(vf))
1074		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1075			vf->vf_id);
1076}
1077
1078/**
1079 * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1080 * @vf: VF to release and setup the VSI for
1081 *
1082 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1083 * configuration change, etc.).
1084 */
1085static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1086{
1087	ice_vf_vsi_release(vf);
1088	if (!ice_vf_vsi_setup(vf))
1089		return -ENOMEM;
1090
1091	return 0;
1092}
1093
1094/**
1095 * ice_vf_rebuild_vsi - rebuild the VF's VSI
1096 * @vf: VF to rebuild the VSI for
1097 *
1098 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1099 * host, PFR, CORER, etc.).
1100 */
1101static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1102{
1103	struct ice_pf *pf = vf->pf;
1104	struct ice_vsi *vsi;
1105
1106	vsi = pf->vsi[vf->lan_vsi_idx];
1107
1108	if (ice_vsi_rebuild(vsi, true)) {
1109		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1110			vf->vf_id);
1111		return -EIO;
1112	}
1113	/* vsi->idx will remain the same in this case so don't update
1114	 * vf->lan_vsi_idx
1115	 */
1116	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1117	vf->lan_vsi_num = vsi->vsi_num;
1118
1119	return 0;
1120}
1121
1122/**
1123 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1124 * @vf: VF to set in initialized state
1125 *
1126 * After this function the VF will be ready to receive/handle the
1127 * VIRTCHNL_OP_GET_VF_RESOURCES message
1128 */
1129static void ice_vf_set_initialized(struct ice_vf *vf)
1130{
1131	ice_set_vf_state_qs_dis(vf);
1132	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1133	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1134	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1135	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1136}
1137
1138/**
1139 * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1140 * @vf: VF to perform tasks on
1141 */
1142static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1143{
1144	struct ice_pf *pf = vf->pf;
1145	struct ice_hw *hw;
1146
1147	hw = &pf->hw;
1148
1149	ice_vf_rebuild_host_cfg(vf);
1150
1151	ice_vf_set_initialized(vf);
1152	ice_ena_vf_mappings(vf);
1153	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1154}
1155
1156/**
1157 * ice_reset_all_vfs - reset all allocated VFs in one go
1158 * @pf: pointer to the PF structure
1159 * @is_vflr: true if VFLR was issued, false if not
1160 *
1161 * First, tell the hardware to reset each VF, then do all the waiting in one
1162 * chunk, and finally finish restoring each VF after the wait. This is useful
1163 * during PF routines which need to reset all VFs, as otherwise it must perform
1164 * these resets in a serialized fashion.
1165 *
1166 * Returns true if any VFs were reset, and false otherwise.
1167 */
1168bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1169{
1170	struct device *dev = ice_pf_to_dev(pf);
1171	struct ice_hw *hw = &pf->hw;
1172	struct ice_vf *vf;
1173	int v, i;
1174
1175	/* If we don't have any VFs, then there is nothing to reset */
1176	if (!pf->num_alloc_vfs)
1177		return false;
1178
1179	/* If VFs have been disabled, there is no need to reset */
1180	if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1181		return false;
1182
1183	/* Begin reset on all VFs at once */
1184	ice_for_each_vf(pf, v)
1185		ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1186
 
 
 
 
 
 
 
 
 
 
 
1187	/* HW requires some time to make sure it can flush the FIFO for a VF
1188	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1189	 * sequence to make sure that it has completed. We'll keep track of
1190	 * the VFs using a simple iterator that increments once that VF has
1191	 * finished resetting.
1192	 */
1193	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
 
1194		/* Check each VF in sequence */
1195		while (v < pf->num_alloc_vfs) {
1196			u32 reg;
1197
1198			vf = &pf->vf[v];
1199			reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1200			if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1201				/* only delay if the check failed */
1202				usleep_range(10, 20);
1203				break;
1204			}
1205
1206			/* If the current VF has finished resetting, move on
1207			 * to the next VF in sequence.
1208			 */
1209			v++;
1210		}
1211	}
1212
1213	/* Display a warning if at least one VF didn't manage to reset in
1214	 * time, but continue on with the operation.
1215	 */
1216	if (v < pf->num_alloc_vfs)
1217		dev_warn(dev, "VF reset check timeout\n");
1218
1219	/* free VF resources to begin resetting the VSI state */
1220	ice_for_each_vf(pf, v) {
1221		vf = &pf->vf[v];
1222
1223		ice_vf_pre_vsi_rebuild(vf);
1224		ice_vf_rebuild_vsi(vf);
1225		ice_vf_post_vsi_rebuild(vf);
 
 
 
 
 
1226	}
1227
1228	ice_flush(hw);
1229	clear_bit(__ICE_VF_DIS, pf->state);
 
 
 
 
1230
1231	return true;
1232}
1233
1234/**
1235 * ice_is_vf_disabled
1236 * @vf: pointer to the VF info
1237 *
1238 * Returns true if the PF or VF is disabled, false otherwise.
1239 */
1240static bool ice_is_vf_disabled(struct ice_vf *vf)
1241{
1242	struct ice_pf *pf = vf->pf;
1243
1244	/* If the PF has been disabled, there is no need resetting VF until
1245	 * PF is active again. Similarly, if the VF has been disabled, this
1246	 * means something else is resetting the VF, so we shouldn't continue.
1247	 * Otherwise, set disable VF state bit for actual reset, and continue.
1248	 */
1249	return (test_bit(__ICE_VF_DIS, pf->state) ||
1250		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1251}
1252
1253/**
1254 * ice_reset_vf - Reset a particular VF
1255 * @vf: pointer to the VF structure
1256 * @is_vflr: true if VFLR was issued, false if not
1257 *
1258 * Returns true if the VF is currently in reset, resets successfully, or resets
1259 * are disabled and false otherwise.
1260 */
1261bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1262{
1263	struct ice_pf *pf = vf->pf;
1264	struct ice_vsi *vsi;
1265	struct device *dev;
1266	struct ice_hw *hw;
1267	bool rsd = false;
1268	u8 promisc_m;
1269	u32 reg;
1270	int i;
1271
1272	dev = ice_pf_to_dev(pf);
 
 
 
 
1273
1274	if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1275		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1276			vf->vf_id);
1277		return true;
1278	}
 
1279
1280	if (ice_is_vf_disabled(vf)) {
1281		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1282			vf->vf_id);
1283		return true;
1284	}
1285
1286	/* Set VF disable bit state here, before triggering reset */
1287	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1288	ice_trigger_vf_reset(vf, is_vflr, false);
1289
1290	vsi = pf->vsi[vf->lan_vsi_idx];
1291
1292	if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1293		ice_dis_vf_qs(vf);
1294
1295	/* Call Disable LAN Tx queue AQ whether or not queues are
1296	 * enabled. This is needed for successful completion of VFR.
1297	 */
1298	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1299			NULL, ICE_VF_RESET, vf->vf_id, NULL);
1300
1301	hw = &pf->hw;
1302	/* poll VPGEN_VFRSTAT reg to make sure
1303	 * that reset is complete
1304	 */
1305	for (i = 0; i < 10; i++) {
1306		/* VF reset requires driver to first reset the VF and then
1307		 * poll the status register to make sure that the reset
1308		 * completed successfully.
1309		 */
1310		reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1311		if (reg & VPGEN_VFRSTAT_VFRD_M) {
1312			rsd = true;
1313			break;
1314		}
1315
1316		/* only sleep if the reset is not done */
1317		usleep_range(10, 20);
1318	}
1319
1320	/* Display a warning if VF didn't manage to reset in time, but need to
1321	 * continue on with the operation.
1322	 */
1323	if (!rsd)
1324		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
 
1325
1326	/* disable promiscuous modes in case they were enabled
1327	 * ignore any error if disabling process failed
1328	 */
1329	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1330	    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1331		if (vf->port_vlan_info || vsi->num_vlan)
1332			promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1333		else
1334			promisc_m = ICE_UCAST_PROMISC_BITS;
1335
1336		vsi = pf->vsi[vf->lan_vsi_idx];
1337		if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1338			dev_err(dev, "disabling promiscuous mode failed\n");
1339	}
1340
1341	ice_vf_pre_vsi_rebuild(vf);
1342	ice_vf_rebuild_vsi_with_release(vf);
1343	ice_vf_post_vsi_rebuild(vf);
 
 
 
1344
1345	return true;
1346}
1347
1348/**
1349 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1350 * @pf: pointer to the PF structure
1351 */
1352void ice_vc_notify_link_state(struct ice_pf *pf)
1353{
1354	int i;
1355
1356	ice_for_each_vf(pf, i)
1357		ice_vc_notify_vf_link_state(&pf->vf[i]);
1358}
1359
1360/**
1361 * ice_vc_notify_reset - Send pending reset message to all VFs
1362 * @pf: pointer to the PF structure
1363 *
1364 * indicate a pending reset to all VFs on a given PF
1365 */
1366void ice_vc_notify_reset(struct ice_pf *pf)
1367{
1368	struct virtchnl_pf_event pfe;
1369
1370	if (!pf->num_alloc_vfs)
1371		return;
1372
1373	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1374	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1375	ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1376			    (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1377}
1378
1379/**
1380 * ice_vc_notify_vf_reset - Notify VF of a reset event
1381 * @vf: pointer to the VF structure
1382 */
1383static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1384{
1385	struct virtchnl_pf_event pfe;
1386	struct ice_pf *pf;
1387
1388	if (!vf)
1389		return;
1390
1391	pf = vf->pf;
1392	if (ice_validate_vf_id(pf, vf->vf_id))
1393		return;
1394
1395	/* Bail out if VF is in disabled state, neither initialized, nor active
1396	 * state - otherwise proceed with notifications
1397	 */
1398	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1399	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1400	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1401		return;
1402
1403	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1404	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1405	ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1406			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1407			      NULL);
1408}
1409
1410/**
1411 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1412 * @vf: VF to initialize/setup the VSI for
1413 *
1414 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1415 * VF VSI's broadcast filter and is only used during initial VF creation.
1416 */
1417static int ice_init_vf_vsi_res(struct ice_vf *vf)
1418{
1419	struct ice_pf *pf = vf->pf;
1420	u8 broadcast[ETH_ALEN];
1421	enum ice_status status;
1422	struct ice_vsi *vsi;
1423	struct device *dev;
1424	int err;
1425
1426	vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1427
1428	dev = ice_pf_to_dev(pf);
1429	vsi = ice_vf_vsi_setup(vf);
1430	if (!vsi)
1431		return -ENOMEM;
1432
1433	err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1434	if (err) {
1435		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1436			 vf->vf_id);
1437		goto release_vsi;
1438	}
1439
1440	eth_broadcast_addr(broadcast);
1441	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1442	if (status) {
1443		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1444			vf->vf_id, ice_stat_str(status));
1445		err = ice_status_to_errno(status);
1446		goto release_vsi;
1447	}
1448
1449	vf->num_mac = 1;
1450
1451	return 0;
1452
1453release_vsi:
1454	ice_vf_vsi_release(vf);
1455	return err;
1456}
1457
1458/**
1459 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1460 * @pf: PF the VFs are associated with
1461 */
1462static int ice_start_vfs(struct ice_pf *pf)
1463{
1464	struct ice_hw *hw = &pf->hw;
1465	int retval, i;
1466
1467	ice_for_each_vf(pf, i) {
1468		struct ice_vf *vf = &pf->vf[i];
1469
1470		ice_clear_vf_reset_trigger(vf);
1471
1472		retval = ice_init_vf_vsi_res(vf);
1473		if (retval) {
1474			dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1475				vf->vf_id, retval);
1476			goto teardown;
1477		}
1478
1479		set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1480		ice_ena_vf_mappings(vf);
1481		wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1482	}
1483
1484	ice_flush(hw);
1485	return 0;
1486
1487teardown:
1488	for (i = i - 1; i >= 0; i--) {
1489		struct ice_vf *vf = &pf->vf[i];
1490
1491		ice_dis_vf_mappings(vf);
1492		ice_vf_vsi_release(vf);
1493	}
1494
1495	return retval;
1496}
1497
1498/**
1499 * ice_set_dflt_settings - set VF defaults during initialization/creation
1500 * @pf: PF holding reference to all VFs for default configuration
1501 */
1502static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1503{
1504	int i;
1505
1506	ice_for_each_vf(pf, i) {
1507		struct ice_vf *vf = &pf->vf[i];
1508
1509		vf->pf = pf;
1510		vf->vf_id = i;
1511		vf->vf_sw_id = pf->first_sw;
1512		/* assign default capabilities */
1513		set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1514		vf->spoofchk = true;
1515		vf->num_vf_qs = pf->num_qps_per_vf;
1516	}
1517}
1518
1519/**
1520 * ice_alloc_vfs - allocate num_vfs in the PF structure
1521 * @pf: PF to store the allocated VFs in
1522 * @num_vfs: number of VFs to allocate
1523 */
1524static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1525{
1526	struct ice_vf *vfs;
1527
1528	vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1529			   GFP_KERNEL);
1530	if (!vfs)
1531		return -ENOMEM;
1532
1533	pf->vf = vfs;
1534	pf->num_alloc_vfs = num_vfs;
1535
1536	return 0;
1537}
1538
1539/**
1540 * ice_ena_vfs - enable VFs so they are ready to be used
1541 * @pf: pointer to the PF structure
1542 * @num_vfs: number of VFs to enable
1543 */
1544static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1545{
1546	struct device *dev = ice_pf_to_dev(pf);
1547	struct ice_hw *hw = &pf->hw;
1548	int ret;
1549
1550	/* Disable global interrupt 0 so we don't try to handle the VFLR. */
1551	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1552	     ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1553	set_bit(__ICE_OICR_INTR_DIS, pf->state);
1554	ice_flush(hw);
1555
1556	ret = pci_enable_sriov(pf->pdev, num_vfs);
1557	if (ret) {
1558		pf->num_alloc_vfs = 0;
1559		goto err_unroll_intr;
1560	}
 
 
 
 
 
 
 
 
1561
1562	ret = ice_alloc_vfs(pf, num_vfs);
1563	if (ret)
1564		goto err_pci_disable_sriov;
 
 
1565
1566	if (ice_set_per_vf_res(pf)) {
1567		dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1568			num_vfs);
1569		ret = -ENOSPC;
1570		goto err_unroll_sriov;
1571	}
 
1572
1573	ice_set_dflt_settings_vfs(pf);
1574
1575	if (ice_start_vfs(pf)) {
1576		dev_err(dev, "Failed to start VF(s)\n");
1577		ret = -EAGAIN;
1578		goto err_unroll_sriov;
1579	}
1580
1581	clear_bit(__ICE_VF_DIS, pf->state);
1582	return 0;
1583
1584err_unroll_sriov:
1585	devm_kfree(dev, pf->vf);
1586	pf->vf = NULL;
 
 
1587	pf->num_alloc_vfs = 0;
1588err_pci_disable_sriov:
1589	pci_disable_sriov(pf->pdev);
1590err_unroll_intr:
1591	/* rearm interrupts here */
1592	ice_irq_dynamic_ena(hw, NULL, NULL);
1593	clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1594	return ret;
1595}
1596
1597/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1598 * ice_pci_sriov_ena - Enable or change number of VFs
1599 * @pf: pointer to the PF structure
1600 * @num_vfs: number of VFs to allocate
1601 *
1602 * Returns 0 on success and negative on failure
1603 */
1604static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1605{
1606	int pre_existing_vfs = pci_num_vf(pf->pdev);
1607	struct device *dev = ice_pf_to_dev(pf);
1608	int err;
1609
 
 
 
 
 
 
 
 
 
 
1610	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1611		ice_free_vfs(pf);
1612	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1613		return 0;
1614
1615	if (num_vfs > pf->num_vfs_supported) {
1616		dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1617			num_vfs, pf->num_vfs_supported);
1618		return -EOPNOTSUPP;
1619	}
1620
1621	dev_info(dev, "Enabling %d VFs\n", num_vfs);
1622	err = ice_ena_vfs(pf, num_vfs);
1623	if (err) {
1624		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1625		return err;
1626	}
1627
1628	set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1629	return 0;
1630}
1631
1632/**
1633 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1634 * @pf: PF to enabled SR-IOV on
1635 */
1636static int ice_check_sriov_allowed(struct ice_pf *pf)
1637{
1638	struct device *dev = ice_pf_to_dev(pf);
1639
1640	if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1641		dev_err(dev, "This device is not capable of SR-IOV\n");
1642		return -EOPNOTSUPP;
1643	}
1644
1645	if (ice_is_safe_mode(pf)) {
1646		dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1647		return -EOPNOTSUPP;
1648	}
1649
1650	if (!ice_pf_state_is_nominal(pf)) {
1651		dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1652		return -EBUSY;
1653	}
1654
1655	return 0;
1656}
1657
1658/**
1659 * ice_sriov_configure - Enable or change number of VFs via sysfs
1660 * @pdev: pointer to a pci_dev structure
1661 * @num_vfs: number of VFs to allocate or 0 to free VFs
1662 *
1663 * This function is called when the user updates the number of VFs in sysfs. On
1664 * success return whatever num_vfs was set to by the caller. Return negative on
1665 * failure.
1666 */
1667int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1668{
1669	struct ice_pf *pf = pci_get_drvdata(pdev);
1670	struct device *dev = ice_pf_to_dev(pf);
1671	int err;
1672
1673	err = ice_check_sriov_allowed(pf);
1674	if (err)
1675		return err;
 
 
1676
1677	if (!num_vfs) {
1678		if (!pci_vfs_assigned(pdev)) {
1679			ice_free_vfs(pf);
1680			return 0;
1681		}
1682
1683		dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
 
 
 
 
1684		return -EBUSY;
1685	}
1686
1687	err = ice_pci_sriov_ena(pf, num_vfs);
1688	if (err)
1689		return err;
1690
1691	return num_vfs;
1692}
1693
1694/**
1695 * ice_process_vflr_event - Free VF resources via IRQ calls
1696 * @pf: pointer to the PF structure
1697 *
1698 * called from the VFLR IRQ handler to
1699 * free up VF resources and state variables
1700 */
1701void ice_process_vflr_event(struct ice_pf *pf)
1702{
1703	struct ice_hw *hw = &pf->hw;
1704	unsigned int vf_id;
1705	u32 reg;
1706
1707	if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1708	    !pf->num_alloc_vfs)
1709		return;
1710
1711	ice_for_each_vf(pf, vf_id) {
1712		struct ice_vf *vf = &pf->vf[vf_id];
1713		u32 reg_idx, bit_idx;
1714
1715		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1716		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1717		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
1718		reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1719		if (reg & BIT(bit_idx))
1720			/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1721			ice_reset_vf(vf, true);
1722	}
1723}
1724
1725/**
1726 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
1727 * @vf: pointer to the VF info
 
 
1728 */
1729static void ice_vc_reset_vf(struct ice_vf *vf)
1730{
1731	ice_vc_notify_vf_reset(vf);
1732	ice_reset_vf(vf, false);
1733}
1734
1735/**
1736 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1737 * @pf: PF used to index all VFs
1738 * @pfq: queue index relative to the PF's function space
1739 *
1740 * If no VF is found who owns the pfq then return NULL, otherwise return a
1741 * pointer to the VF who owns the pfq
1742 */
1743static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1744{
1745	unsigned int vf_id;
1746
1747	ice_for_each_vf(pf, vf_id) {
1748		struct ice_vf *vf = &pf->vf[vf_id];
1749		struct ice_vsi *vsi;
1750		u16 rxq_idx;
1751
1752		vsi = pf->vsi[vf->lan_vsi_idx];
1753
1754		ice_for_each_rxq(vsi, rxq_idx)
1755			if (vsi->rxq_map[rxq_idx] == pfq)
1756				return vf;
1757	}
1758
1759	return NULL;
1760}
1761
1762/**
1763 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1764 * @pf: PF used for conversion
1765 * @globalq: global queue index used to convert to PF space queue index
1766 */
1767static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1768{
1769	return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1770}
1771
1772/**
1773 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1774 * @pf: PF that the LAN overflow event happened on
1775 * @event: structure holding the event information for the LAN overflow event
1776 *
1777 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1778 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1779 * reset on the offending VF.
1780 */
1781void
1782ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1783{
1784	u32 gldcb_rtctq, queue;
1785	struct ice_vf *vf;
1786
1787	gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1788	dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1789
1790	/* event returns device global Rx queue number */
1791	queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1792		GLDCB_RTCTQ_RXQNUM_S;
1793
1794	vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1795	if (!vf)
1796		return;
1797
1798	ice_vc_reset_vf(vf);
1799}
1800
1801/**
1802 * ice_vc_send_msg_to_vf - Send message to VF
1803 * @vf: pointer to the VF info
1804 * @v_opcode: virtual channel opcode
1805 * @v_retval: virtual channel return value
1806 * @msg: pointer to the msg buffer
1807 * @msglen: msg length
1808 *
1809 * send msg to VF
1810 */
1811static int
1812ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1813		      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1814{
1815	enum ice_status aq_ret;
1816	struct device *dev;
1817	struct ice_pf *pf;
1818
1819	if (!vf)
 
1820		return -EINVAL;
1821
1822	pf = vf->pf;
1823	if (ice_validate_vf_id(pf, vf->vf_id))
1824		return -EINVAL;
1825
1826	dev = ice_pf_to_dev(pf);
1827
1828	/* single place to detect unsuccessful return values */
1829	if (v_retval) {
1830		vf->num_inval_msgs++;
1831		dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1832			 v_opcode, v_retval);
1833		if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1834			dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
 
1835				vf->vf_id);
1836			dev_err(dev, "Use PF Control I/F to enable the VF\n");
1837			set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1838			return -EIO;
1839		}
1840	} else {
1841		vf->num_valid_msgs++;
1842		/* reset the invalid counter, if a valid message is received. */
1843		vf->num_inval_msgs = 0;
1844	}
1845
1846	aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1847				       msg, msglen, NULL);
1848	if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1849		dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1850			 vf->vf_id, ice_stat_str(aq_ret),
1851			 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1852		return -EIO;
1853	}
1854
1855	return 0;
1856}
1857
1858/**
1859 * ice_vc_get_ver_msg
1860 * @vf: pointer to the VF info
1861 * @msg: pointer to the msg buffer
1862 *
1863 * called from the VF to request the API version used by the PF
1864 */
1865static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1866{
1867	struct virtchnl_version_info info = {
1868		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1869	};
1870
1871	vf->vf_ver = *(struct virtchnl_version_info *)msg;
1872	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1873	if (VF_IS_V10(&vf->vf_ver))
1874		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1875
1876	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1877				     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1878				     sizeof(struct virtchnl_version_info));
1879}
1880
1881/**
1882 * ice_vc_get_vf_res_msg
1883 * @vf: pointer to the VF info
1884 * @msg: pointer to the msg buffer
1885 *
1886 * called from the VF to request its resources
1887 */
1888static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1889{
1890	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1891	struct virtchnl_vf_resource *vfres = NULL;
1892	struct ice_pf *pf = vf->pf;
1893	struct ice_vsi *vsi;
1894	int len = 0;
1895	int ret;
1896
1897	if (ice_check_vf_init(pf, vf)) {
1898		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1899		goto err;
1900	}
1901
1902	len = sizeof(struct virtchnl_vf_resource);
1903
1904	vfres = kzalloc(len, GFP_KERNEL);
1905	if (!vfres) {
1906		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1907		len = 0;
1908		goto err;
1909	}
1910	if (VF_IS_V11(&vf->vf_ver))
1911		vf->driver_caps = *(u32 *)msg;
1912	else
1913		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1914				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1915				  VIRTCHNL_VF_OFFLOAD_VLAN;
1916
1917	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1918	vsi = pf->vsi[vf->lan_vsi_idx];
1919	if (!vsi) {
1920		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1921		goto err;
1922	}
1923
1924	if (!vsi->info.pvid)
1925		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1926
1927	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1928		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1929	} else {
1930		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1931			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1932		else
1933			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1934	}
1935
1936	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1937		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1938
1939	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1940		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1941
1942	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1943		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1944
1945	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1946		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1947
1948	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1949		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1950
1951	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1952		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1953
1954	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1955		vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1956
1957	vfres->num_vsis = 1;
1958	/* Tx and Rx queue are equal for VF */
1959	vfres->num_queue_pairs = vsi->num_txq;
1960	vfres->max_vectors = pf->num_msix_per_vf;
1961	vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1962	vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1963
1964	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1965	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1966	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1967	ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1968			vf->dflt_lan_addr.addr);
1969
1970	/* match guest capabilities */
1971	vf->driver_caps = vfres->vf_cap_flags;
1972
1973	set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1974
1975err:
1976	/* send the response back to the VF */
1977	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1978				    (u8 *)vfres, len);
1979
1980	kfree(vfres);
1981	return ret;
1982}
1983
1984/**
1985 * ice_vc_reset_vf_msg
1986 * @vf: pointer to the VF info
1987 *
1988 * called from the VF to reset itself,
1989 * unlike other virtchnl messages, PF driver
1990 * doesn't send the response back to the VF
1991 */
1992static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1993{
1994	if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
1995		ice_reset_vf(vf, false);
1996}
1997
1998/**
1999 * ice_find_vsi_from_id
2000 * @pf: the PF structure to search for the VSI
2001 * @id: ID of the VSI it is searching for
2002 *
2003 * searches for the VSI with the given ID
2004 */
2005static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2006{
2007	int i;
2008
2009	ice_for_each_vsi(pf, i)
2010		if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2011			return pf->vsi[i];
2012
2013	return NULL;
2014}
2015
2016/**
2017 * ice_vc_isvalid_vsi_id
2018 * @vf: pointer to the VF info
2019 * @vsi_id: VF relative VSI ID
2020 *
2021 * check for the valid VSI ID
2022 */
2023static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2024{
2025	struct ice_pf *pf = vf->pf;
2026	struct ice_vsi *vsi;
2027
2028	vsi = ice_find_vsi_from_id(pf, vsi_id);
2029
2030	return (vsi && (vsi->vf_id == vf->vf_id));
2031}
2032
2033/**
2034 * ice_vc_isvalid_q_id
2035 * @vf: pointer to the VF info
2036 * @vsi_id: VSI ID
2037 * @qid: VSI relative queue ID
2038 *
2039 * check for the valid queue ID
2040 */
2041static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2042{
2043	struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2044	/* allocated Tx and Rx queues should be always equal for VF VSI */
2045	return (vsi && (qid < vsi->alloc_txq));
2046}
2047
2048/**
2049 * ice_vc_isvalid_ring_len
2050 * @ring_len: length of ring
2051 *
2052 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
2053 * or zero
2054 */
2055static bool ice_vc_isvalid_ring_len(u16 ring_len)
2056{
2057	return ring_len == 0 ||
2058	       (ring_len >= ICE_MIN_NUM_DESC &&
2059		ring_len <= ICE_MAX_NUM_DESC &&
2060		!(ring_len % ICE_REQ_DESC_MULTIPLE));
2061}
2062
2063/**
2064 * ice_vc_config_rss_key
2065 * @vf: pointer to the VF info
2066 * @msg: pointer to the msg buffer
2067 *
2068 * Configure the VF's RSS key
2069 */
2070static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2071{
2072	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2073	struct virtchnl_rss_key *vrk =
2074		(struct virtchnl_rss_key *)msg;
2075	struct ice_pf *pf = vf->pf;
2076	struct ice_vsi *vsi;
2077
2078	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2079		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2080		goto error_param;
2081	}
2082
2083	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2084		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2085		goto error_param;
2086	}
2087
2088	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2089		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2090		goto error_param;
2091	}
2092
2093	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2094		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2095		goto error_param;
2096	}
2097
2098	vsi = pf->vsi[vf->lan_vsi_idx];
2099	if (!vsi) {
2100		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2101		goto error_param;
2102	}
2103
2104	if (ice_set_rss(vsi, vrk->key, NULL, 0))
2105		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2106error_param:
2107	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2108				     NULL, 0);
2109}
2110
2111/**
2112 * ice_vc_config_rss_lut
2113 * @vf: pointer to the VF info
2114 * @msg: pointer to the msg buffer
2115 *
2116 * Configure the VF's RSS LUT
2117 */
2118static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2119{
2120	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2121	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2122	struct ice_pf *pf = vf->pf;
2123	struct ice_vsi *vsi;
2124
2125	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2126		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2127		goto error_param;
2128	}
2129
2130	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2131		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2132		goto error_param;
2133	}
2134
2135	if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2136		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2137		goto error_param;
2138	}
2139
2140	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2141		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2142		goto error_param;
2143	}
2144
2145	vsi = pf->vsi[vf->lan_vsi_idx];
2146	if (!vsi) {
2147		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2148		goto error_param;
2149	}
2150
2151	if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2152		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2153error_param:
2154	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2155				     NULL, 0);
2156}
2157
2158/**
2159 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2160 * @vf: The VF being resseting
2161 *
2162 * The max poll time is about ~800ms, which is about the maximum time it takes
2163 * for a VF to be reset and/or a VF driver to be removed.
2164 */
2165static void ice_wait_on_vf_reset(struct ice_vf *vf)
2166{
2167	int i;
2168
2169	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2170		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2171			break;
2172		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2173	}
2174}
2175
2176/**
2177 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2178 * @vf: VF to check if it's ready to be configured/queried
2179 *
2180 * The purpose of this function is to make sure the VF is not in reset, not
2181 * disabled, and initialized so it can be configured and/or queried by a host
2182 * administrator.
2183 */
2184static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2185{
2186	struct ice_pf *pf;
2187
2188	ice_wait_on_vf_reset(vf);
2189
2190	if (ice_is_vf_disabled(vf))
2191		return -EINVAL;
2192
2193	pf = vf->pf;
2194	if (ice_check_vf_init(pf, vf))
2195		return -EBUSY;
2196
2197	return 0;
2198}
2199
2200/**
2201 * ice_set_vf_spoofchk
2202 * @netdev: network interface device structure
2203 * @vf_id: VF identifier
2204 * @ena: flag to enable or disable feature
2205 *
2206 * Enable or disable VF spoof checking
2207 */
2208int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2209{
2210	struct ice_netdev_priv *np = netdev_priv(netdev);
2211	struct ice_pf *pf = np->vsi->back;
2212	struct ice_vsi_ctx *ctx;
2213	struct ice_vsi *vf_vsi;
2214	enum ice_status status;
2215	struct device *dev;
2216	struct ice_vf *vf;
2217	int ret;
2218
2219	dev = ice_pf_to_dev(pf);
2220	if (ice_validate_vf_id(pf, vf_id))
2221		return -EINVAL;
2222
2223	vf = &pf->vf[vf_id];
2224	ret = ice_check_vf_ready_for_cfg(vf);
2225	if (ret)
2226		return ret;
2227
2228	vf_vsi = pf->vsi[vf->lan_vsi_idx];
2229	if (!vf_vsi) {
2230		netdev_err(netdev, "VSI %d for VF %d is null\n",
2231			   vf->lan_vsi_idx, vf->vf_id);
2232		return -EINVAL;
2233	}
2234
2235	if (vf_vsi->type != ICE_VSI_VF) {
2236		netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2237			   vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2238		return -ENODEV;
2239	}
2240
2241	if (ena == vf->spoofchk) {
2242		dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2243		return 0;
2244	}
2245
2246	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2247	if (!ctx)
2248		return -ENOMEM;
2249
2250	ctx->info.sec_flags = vf_vsi->info.sec_flags;
2251	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2252	if (ena) {
2253		ctx->info.sec_flags |=
2254			ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2255			(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2256			 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2257	} else {
2258		ctx->info.sec_flags &=
2259			~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2260			  (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2261			   ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2262	}
2263
2264	status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2265	if (status) {
2266		dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2267			ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2268			ice_stat_str(status));
2269		ret = -EIO;
2270		goto out;
2271	}
2272
2273	/* only update spoofchk state and VSI context on success */
2274	vf_vsi->info.sec_flags = ctx->info.sec_flags;
2275	vf->spoofchk = ena;
2276
2277out:
2278	kfree(ctx);
2279	return ret;
2280}
2281
2282/**
2283 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2284 * @pf: PF structure for accessing VF(s)
2285 *
2286 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2287 * else return true
2288 */
2289bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2290{
2291	int vf_idx;
2292
2293	ice_for_each_vf(pf, vf_idx) {
2294		struct ice_vf *vf = &pf->vf[vf_idx];
2295
2296		/* found a VF that has promiscuous mode configured */
2297		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2298		    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2299			return true;
2300	}
2301
2302	return false;
2303}
2304
2305/**
2306 * ice_vc_cfg_promiscuous_mode_msg
2307 * @vf: pointer to the VF info
2308 * @msg: pointer to the msg buffer
2309 *
2310 * called from the VF to configure VF VSIs promiscuous mode
2311 */
2312static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2313{
2314	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2315	struct virtchnl_promisc_info *info =
2316	    (struct virtchnl_promisc_info *)msg;
2317	struct ice_pf *pf = vf->pf;
2318	struct ice_vsi *vsi;
2319	struct device *dev;
2320	bool rm_promisc;
2321	int ret = 0;
2322
2323	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2324		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2325		goto error_param;
2326	}
2327
2328	if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2329		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2330		goto error_param;
2331	}
2332
2333	vsi = pf->vsi[vf->lan_vsi_idx];
2334	if (!vsi) {
2335		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2336		goto error_param;
2337	}
2338
2339	dev = ice_pf_to_dev(pf);
2340	if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2341		dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2342			vf->vf_id);
2343		/* Leave v_ret alone, lie to the VF on purpose. */
2344		goto error_param;
2345	}
2346
2347	rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2348		!(info->flags & FLAG_VF_MULTICAST_PROMISC);
2349
2350	if (vsi->num_vlan || vf->port_vlan_info) {
2351		struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2352		struct net_device *pf_netdev;
2353
2354		if (!pf_vsi) {
2355			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2356			goto error_param;
2357		}
2358
2359		pf_netdev = pf_vsi->netdev;
2360
2361		ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2362		if (ret) {
2363			dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2364				rm_promisc ? "ON" : "OFF", vf->vf_id,
2365				vsi->vsi_num);
2366			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2367		}
2368
2369		ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2370		if (ret) {
2371			dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2372			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2373			goto error_param;
2374		}
2375	}
2376
2377	if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2378		bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2379
2380		if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2381			/* only attempt to set the default forwarding VSI if
2382			 * it's not currently set
2383			 */
2384			ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2385		else if (!set_dflt_vsi &&
2386			 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2387			/* only attempt to free the default forwarding VSI if we
2388			 * are the owner
2389			 */
2390			ret = ice_clear_dflt_vsi(pf->first_sw);
2391
2392		if (ret) {
2393			dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2394				set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2395			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2396			goto error_param;
2397		}
2398	} else {
2399		enum ice_status status;
2400		u8 promisc_m;
2401
2402		if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2403			if (vf->port_vlan_info || vsi->num_vlan)
2404				promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2405			else
2406				promisc_m = ICE_UCAST_PROMISC_BITS;
2407		} else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2408			if (vf->port_vlan_info || vsi->num_vlan)
2409				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2410			else
2411				promisc_m = ICE_MCAST_PROMISC_BITS;
2412		} else {
2413			if (vf->port_vlan_info || vsi->num_vlan)
2414				promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2415			else
2416				promisc_m = ICE_UCAST_PROMISC_BITS;
2417		}
2418
2419		/* Configure multicast/unicast with or without VLAN promiscuous
2420		 * mode
2421		 */
2422		status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2423		if (status) {
2424			dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2425				rm_promisc ? "dis" : "en", vf->vf_id,
2426				ice_stat_str(status));
2427			v_ret = ice_err_to_virt_err(status);
2428			goto error_param;
2429		} else {
2430			dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2431				rm_promisc ? "dis" : "en", vf->vf_id);
2432		}
2433	}
2434
2435	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2436		set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2437	else
2438		clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2439
2440	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2441		set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2442	else
2443		clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2444
2445error_param:
2446	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2447				     v_ret, NULL, 0);
2448}
2449
2450/**
2451 * ice_vc_get_stats_msg
2452 * @vf: pointer to the VF info
2453 * @msg: pointer to the msg buffer
2454 *
2455 * called from the VF to get VSI stats
2456 */
2457static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2458{
2459	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2460	struct virtchnl_queue_select *vqs =
2461		(struct virtchnl_queue_select *)msg;
2462	struct ice_eth_stats stats = { 0 };
2463	struct ice_pf *pf = vf->pf;
 
2464	struct ice_vsi *vsi;
2465
2466	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2467		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2468		goto error_param;
2469	}
2470
2471	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2472		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2473		goto error_param;
2474	}
2475
2476	vsi = pf->vsi[vf->lan_vsi_idx];
2477	if (!vsi) {
2478		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2479		goto error_param;
2480	}
2481
 
2482	ice_update_eth_stats(vsi);
2483
2484	stats = vsi->eth_stats;
2485
2486error_param:
2487	/* send the response to the VF */
2488	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
2489				     (u8 *)&stats, sizeof(stats));
2490}
2491
2492/**
2493 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2494 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2495 *
2496 * Return true on successful validation, else false
2497 */
2498static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2499{
2500	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2501	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2502	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
2503		return false;
2504
2505	return true;
2506}
2507
2508/**
2509 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
2510 * @vsi: VSI of the VF to configure
2511 * @q_idx: VF queue index used to determine the queue in the PF's space
2512 */
2513static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2514{
2515	struct ice_hw *hw = &vsi->back->hw;
2516	u32 pfq = vsi->txq_map[q_idx];
2517	u32 reg;
2518
2519	reg = rd32(hw, QINT_TQCTL(pfq));
2520
2521	/* MSI-X index 0 in the VF's space is always for the OICR, which means
2522	 * this is most likely a poll mode VF driver, so don't enable an
2523	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2524	 */
2525	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2526		return;
2527
2528	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2529}
2530
2531/**
2532 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
2533 * @vsi: VSI of the VF to configure
2534 * @q_idx: VF queue index used to determine the queue in the PF's space
2535 */
2536static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2537{
2538	struct ice_hw *hw = &vsi->back->hw;
2539	u32 pfq = vsi->rxq_map[q_idx];
2540	u32 reg;
2541
2542	reg = rd32(hw, QINT_RQCTL(pfq));
2543
2544	/* MSI-X index 0 in the VF's space is always for the OICR, which means
2545	 * this is most likely a poll mode VF driver, so don't enable an
2546	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2547	 */
2548	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2549		return;
2550
2551	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2552}
2553
2554/**
2555 * ice_vc_ena_qs_msg
2556 * @vf: pointer to the VF info
2557 * @msg: pointer to the msg buffer
2558 *
2559 * called from the VF to enable all or specific queue(s)
2560 */
2561static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2562{
2563	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2564	struct virtchnl_queue_select *vqs =
2565	    (struct virtchnl_queue_select *)msg;
2566	struct ice_pf *pf = vf->pf;
2567	struct ice_vsi *vsi;
2568	unsigned long q_map;
2569	u16 vf_q_id;
2570
2571	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2572		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2573		goto error_param;
2574	}
2575
2576	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2577		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2578		goto error_param;
2579	}
2580
2581	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
 
 
 
 
 
 
2582		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2583		goto error_param;
2584	}
2585
2586	vsi = pf->vsi[vf->lan_vsi_idx];
2587	if (!vsi) {
2588		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2589		goto error_param;
2590	}
2591
2592	/* Enable only Rx rings, Tx rings were enabled by the FW when the
2593	 * Tx queue group list was configured and the context bits were
2594	 * programmed using ice_vsi_cfg_txqs
2595	 */
2596	q_map = vqs->rx_queues;
2597	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2598		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2599			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2600			goto error_param;
2601		}
2602
2603		/* Skip queue if enabled */
2604		if (test_bit(vf_q_id, vf->rxq_ena))
2605			continue;
2606
2607		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
2608			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
 
2609				vf_q_id, vsi->vsi_num);
2610			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2611			goto error_param;
2612		}
2613
2614		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
2615		set_bit(vf_q_id, vf->rxq_ena);
 
2616	}
2617
2618	vsi = pf->vsi[vf->lan_vsi_idx];
2619	q_map = vqs->tx_queues;
2620	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2621		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2622			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2623			goto error_param;
2624		}
2625
2626		/* Skip queue if enabled */
2627		if (test_bit(vf_q_id, vf->txq_ena))
2628			continue;
2629
2630		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
2631		set_bit(vf_q_id, vf->txq_ena);
 
2632	}
2633
2634	/* Set flag to indicate that queues are enabled */
2635	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2636		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2637
2638error_param:
2639	/* send the response to the VF */
2640	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2641				     NULL, 0);
2642}
2643
2644/**
2645 * ice_vc_dis_qs_msg
2646 * @vf: pointer to the VF info
2647 * @msg: pointer to the msg buffer
2648 *
2649 * called from the VF to disable all or specific
2650 * queue(s)
2651 */
2652static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2653{
2654	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2655	struct virtchnl_queue_select *vqs =
2656	    (struct virtchnl_queue_select *)msg;
2657	struct ice_pf *pf = vf->pf;
2658	struct ice_vsi *vsi;
2659	unsigned long q_map;
2660	u16 vf_q_id;
2661
2662	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2663	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2664		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2665		goto error_param;
2666	}
2667
2668	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2669		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2670		goto error_param;
2671	}
2672
2673	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
 
 
 
 
 
 
2674		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2675		goto error_param;
2676	}
2677
2678	vsi = pf->vsi[vf->lan_vsi_idx];
2679	if (!vsi) {
2680		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2681		goto error_param;
2682	}
2683
2684	if (vqs->tx_queues) {
2685		q_map = vqs->tx_queues;
2686
2687		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2688			struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2689			struct ice_txq_meta txq_meta = { 0 };
2690
2691			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2692				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2693				goto error_param;
2694			}
2695
2696			/* Skip queue if not enabled */
2697			if (!test_bit(vf_q_id, vf->txq_ena))
2698				continue;
2699
2700			ice_fill_txq_meta(vsi, ring, &txq_meta);
2701
2702			if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2703						 ring, &txq_meta)) {
2704				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
 
2705					vf_q_id, vsi->vsi_num);
2706				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2707				goto error_param;
2708			}
2709
2710			/* Clear enabled queues flag */
2711			clear_bit(vf_q_id, vf->txq_ena);
 
2712		}
2713	}
2714
2715	q_map = vqs->rx_queues;
2716	/* speed up Rx queue disable by batching them if possible */
2717	if (q_map &&
2718	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
2719		if (ice_vsi_stop_all_rx_rings(vsi)) {
2720			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2721				vsi->vsi_num);
2722			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2723			goto error_param;
2724		}
2725
2726		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
2727	} else if (q_map) {
2728		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2729			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2730				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2731				goto error_param;
2732			}
2733
2734			/* Skip queue if not enabled */
2735			if (!test_bit(vf_q_id, vf->rxq_ena))
2736				continue;
2737
2738			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2739						     true)) {
2740				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
2741					vf_q_id, vsi->vsi_num);
2742				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2743				goto error_param;
2744			}
2745
2746			/* Clear enabled queues flag */
2747			clear_bit(vf_q_id, vf->rxq_ena);
 
2748		}
2749	}
2750
2751	/* Clear enabled queues flag */
2752	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
2753		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2754
2755error_param:
2756	/* send the response to the VF */
2757	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2758				     NULL, 0);
2759}
2760
2761/**
2762 * ice_cfg_interrupt
2763 * @vf: pointer to the VF info
2764 * @vsi: the VSI being configured
2765 * @vector_id: vector ID
2766 * @map: vector map for mapping vectors to queues
2767 * @q_vector: structure for interrupt vector
2768 * configure the IRQ to queue map
2769 */
2770static int
2771ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2772		  struct virtchnl_vector_map *map,
2773		  struct ice_q_vector *q_vector)
2774{
2775	u16 vsi_q_id, vsi_q_id_idx;
2776	unsigned long qmap;
2777
2778	q_vector->num_ring_rx = 0;
2779	q_vector->num_ring_tx = 0;
2780
2781	qmap = map->rxq_map;
2782	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2783		vsi_q_id = vsi_q_id_idx;
2784
2785		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2786			return VIRTCHNL_STATUS_ERR_PARAM;
2787
2788		q_vector->num_ring_rx++;
2789		q_vector->rx.itr_idx = map->rxitr_idx;
2790		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2791		ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2792				      q_vector->rx.itr_idx);
2793	}
2794
2795	qmap = map->txq_map;
2796	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2797		vsi_q_id = vsi_q_id_idx;
2798
2799		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2800			return VIRTCHNL_STATUS_ERR_PARAM;
2801
2802		q_vector->num_ring_tx++;
2803		q_vector->tx.itr_idx = map->txitr_idx;
2804		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2805		ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2806				      q_vector->tx.itr_idx);
2807	}
2808
2809	return VIRTCHNL_STATUS_SUCCESS;
2810}
2811
2812/**
2813 * ice_vc_cfg_irq_map_msg
2814 * @vf: pointer to the VF info
2815 * @msg: pointer to the msg buffer
2816 *
2817 * called from the VF to configure the IRQ to queue map
2818 */
2819static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2820{
2821	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2822	u16 num_q_vectors_mapped, vsi_id, vector_id;
2823	struct virtchnl_irq_map_info *irqmap_info;
 
2824	struct virtchnl_vector_map *map;
2825	struct ice_pf *pf = vf->pf;
 
2826	struct ice_vsi *vsi;
 
2827	int i;
2828
2829	irqmap_info = (struct virtchnl_irq_map_info *)msg;
2830	num_q_vectors_mapped = irqmap_info->num_vectors;
2831
2832	/* Check to make sure number of VF vectors mapped is not greater than
2833	 * number of VF vectors originally allocated, and check that
2834	 * there is actually at least a single VF queue vector mapped
2835	 */
2836	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2837	    pf->num_msix_per_vf < num_q_vectors_mapped ||
2838	    !num_q_vectors_mapped) {
2839		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2840		goto error_param;
2841	}
2842
2843	vsi = pf->vsi[vf->lan_vsi_idx];
2844	if (!vsi) {
2845		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2846		goto error_param;
2847	}
2848
2849	for (i = 0; i < num_q_vectors_mapped; i++) {
2850		struct ice_q_vector *q_vector;
2851
2852		map = &irqmap_info->vecmap[i];
2853
2854		vector_id = map->vector_id;
2855		vsi_id = map->vsi_id;
2856		/* vector_id is always 0-based for each VF, and can never be
2857		 * larger than or equal to the max allowed interrupts per VF
2858		 */
2859		if (!(vector_id < pf->num_msix_per_vf) ||
2860		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2861		    (!vector_id && (map->rxq_map || map->txq_map))) {
2862			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2863			goto error_param;
2864		}
2865
2866		/* No need to map VF miscellaneous or rogue vector */
2867		if (!vector_id)
2868			continue;
2869
2870		/* Subtract non queue vector from vector_id passed by VF
2871		 * to get actual number of VSI queue vector array index
2872		 */
2873		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2874		if (!q_vector) {
2875			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2876			goto error_param;
2877		}
2878
2879		/* lookout for the invalid queue index */
2880		v_ret = (enum virtchnl_status_code)
2881			ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2882		if (v_ret)
2883			goto error_param;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2884	}
2885
2886error_param:
2887	/* send the response to the VF */
2888	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2889				     NULL, 0);
2890}
2891
2892/**
2893 * ice_vc_cfg_qs_msg
2894 * @vf: pointer to the VF info
2895 * @msg: pointer to the msg buffer
2896 *
2897 * called from the VF to configure the Rx/Tx queues
2898 */
2899static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2900{
2901	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2902	struct virtchnl_vsi_queue_config_info *qci =
2903	    (struct virtchnl_vsi_queue_config_info *)msg;
2904	struct virtchnl_queue_pair_info *qpi;
2905	u16 num_rxq = 0, num_txq = 0;
2906	struct ice_pf *pf = vf->pf;
2907	struct ice_vsi *vsi;
2908	int i;
2909
2910	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2911		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2912		goto error_param;
2913	}
2914
2915	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2916		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2917		goto error_param;
2918	}
2919
2920	vsi = pf->vsi[vf->lan_vsi_idx];
2921	if (!vsi) {
2922		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2923		goto error_param;
2924	}
2925
2926	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
2927	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2928		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
 
2929			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2930		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2931		goto error_param;
2932	}
2933
2934	for (i = 0; i < qci->num_queue_pairs; i++) {
2935		qpi = &qci->qpair[i];
2936		if (qpi->txq.vsi_id != qci->vsi_id ||
2937		    qpi->rxq.vsi_id != qci->vsi_id ||
2938		    qpi->rxq.queue_id != qpi->txq.queue_id ||
2939		    qpi->txq.headwb_enabled ||
2940		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2941		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2942		    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2943			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2944			goto error_param;
2945		}
2946		/* copy Tx queue info from VF into VSI */
2947		if (qpi->txq.ring_len > 0) {
2948			num_txq++;
2949			vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2950			vsi->tx_rings[i]->count = qpi->txq.ring_len;
2951		}
2952
2953		/* copy Rx queue info from VF into VSI */
2954		if (qpi->rxq.ring_len > 0) {
2955			num_rxq++;
2956			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2957			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2958
2959			if (qpi->rxq.databuffer_size != 0 &&
2960			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2961			     qpi->rxq.databuffer_size < 1024)) {
2962				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2963				goto error_param;
2964			}
2965			vsi->rx_buf_len = qpi->rxq.databuffer_size;
2966			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2967			if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2968			    qpi->rxq.max_pkt_size < 64) {
2969				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2970				goto error_param;
2971			}
2972		}
2973
2974		vsi->max_frame = qpi->rxq.max_pkt_size;
2975	}
2976
2977	/* VF can request to configure less than allocated queues or default
2978	 * allocated queues. So update the VSI with new number
2979	 */
2980	vsi->num_txq = num_txq;
2981	vsi->num_rxq = num_rxq;
2982	/* All queues of VF VSI are in TC 0 */
2983	vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2984	vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
2985
2986	if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2987		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2988
2989error_param:
2990	/* send the response to the VF */
2991	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2992				     NULL, 0);
2993}
2994
2995/**
2996 * ice_is_vf_trusted
2997 * @vf: pointer to the VF info
2998 */
2999static bool ice_is_vf_trusted(struct ice_vf *vf)
3000{
3001	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3002}
3003
3004/**
3005 * ice_can_vf_change_mac
3006 * @vf: pointer to the VF info
3007 *
3008 * Return true if the VF is allowed to change its MAC filters, false otherwise
3009 */
3010static bool ice_can_vf_change_mac(struct ice_vf *vf)
3011{
3012	/* If the VF MAC address has been set administratively (via the
3013	 * ndo_set_vf_mac command), then deny permission to the VF to
3014	 * add/delete unicast MAC addresses, unless the VF is trusted
3015	 */
3016	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3017		return false;
3018
3019	return true;
3020}
3021
3022/**
3023 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3024 * @vf: pointer to the VF info
3025 * @vsi: pointer to the VF's VSI
3026 * @mac_addr: MAC address to add
3027 */
3028static int
3029ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3030{
3031	struct device *dev = ice_pf_to_dev(vf->pf);
3032	enum ice_status status;
3033
3034	/* default unicast MAC already added */
3035	if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3036		return 0;
3037
3038	if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3039		dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3040		return -EPERM;
3041	}
3042
3043	status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3044	if (status == ICE_ERR_ALREADY_EXISTS) {
3045		dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3046			vf->vf_id);
3047		return -EEXIST;
3048	} else if (status) {
3049		dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3050			mac_addr, vf->vf_id, ice_stat_str(status));
3051		return -EIO;
3052	}
3053
3054	/* Set the default LAN address to the latest unicast MAC address added
3055	 * by the VF. The default LAN address is reported by the PF via
3056	 * ndo_get_vf_config.
3057	 */
3058	if (is_unicast_ether_addr(mac_addr))
3059		ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3060
3061	vf->num_mac++;
3062
3063	return 0;
3064}
3065
3066/**
3067 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3068 * @vf: pointer to the VF info
3069 * @vsi: pointer to the VF's VSI
3070 * @mac_addr: MAC address to delete
3071 */
3072static int
3073ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3074{
3075	struct device *dev = ice_pf_to_dev(vf->pf);
3076	enum ice_status status;
3077
3078	if (!ice_can_vf_change_mac(vf) &&
3079	    ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3080		return 0;
3081
3082	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3083	if (status == ICE_ERR_DOES_NOT_EXIST) {
3084		dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3085			vf->vf_id);
3086		return -ENOENT;
3087	} else if (status) {
3088		dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3089			mac_addr, vf->vf_id, ice_stat_str(status));
3090		return -EIO;
3091	}
3092
3093	if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3094		eth_zero_addr(vf->dflt_lan_addr.addr);
3095
3096	vf->num_mac--;
3097
3098	return 0;
3099}
3100
3101/**
3102 * ice_vc_handle_mac_addr_msg
3103 * @vf: pointer to the VF info
3104 * @msg: pointer to the msg buffer
3105 * @set: true if MAC filters are being set, false otherwise
3106 *
3107 * add guest MAC address filter
3108 */
3109static int
3110ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3111{
3112	int (*ice_vc_cfg_mac)
3113		(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
3114	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3115	struct virtchnl_ether_addr_list *al =
3116	    (struct virtchnl_ether_addr_list *)msg;
3117	struct ice_pf *pf = vf->pf;
3118	enum virtchnl_ops vc_op;
 
3119	struct ice_vsi *vsi;
 
3120	int i;
3121
3122	if (set) {
3123		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3124		ice_vc_cfg_mac = ice_vc_add_mac_addr;
3125	} else {
3126		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3127		ice_vc_cfg_mac = ice_vc_del_mac_addr;
3128	}
3129
3130	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3131	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3132		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3133		goto handle_mac_exit;
3134	}
3135
3136	/* If this VF is not privileged, then we can't add more than a
3137	 * limited number of addresses. Check to make sure that the
3138	 * additions do not push us over the limit.
3139	 */
3140	if (set && !ice_is_vf_trusted(vf) &&
3141	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3142		dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
 
3143			vf->vf_id);
 
 
 
3144		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3145		goto handle_mac_exit;
3146	}
3147
3148	vsi = pf->vsi[vf->lan_vsi_idx];
3149	if (!vsi) {
3150		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3151		goto handle_mac_exit;
3152	}
3153
3154	for (i = 0; i < al->num_elements; i++) {
3155		u8 *mac_addr = al->list[i].addr;
3156		int result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3157
3158		if (is_broadcast_ether_addr(mac_addr) ||
3159		    is_zero_ether_addr(mac_addr))
3160			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3161
3162		result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3163		if (result == -EEXIST || result == -ENOENT) {
3164			continue;
3165		} else if (result) {
3166			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
 
 
 
 
 
 
 
 
3167			goto handle_mac_exit;
3168		}
 
 
3169	}
3170
 
 
 
 
 
 
3171handle_mac_exit:
3172	/* send the response to the VF */
3173	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3174}
3175
3176/**
3177 * ice_vc_add_mac_addr_msg
3178 * @vf: pointer to the VF info
3179 * @msg: pointer to the msg buffer
3180 *
3181 * add guest MAC address filter
3182 */
3183static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3184{
3185	return ice_vc_handle_mac_addr_msg(vf, msg, true);
3186}
3187
3188/**
3189 * ice_vc_del_mac_addr_msg
3190 * @vf: pointer to the VF info
3191 * @msg: pointer to the msg buffer
3192 *
3193 * remove guest MAC address filter
3194 */
3195static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3196{
3197	return ice_vc_handle_mac_addr_msg(vf, msg, false);
3198}
3199
3200/**
3201 * ice_vc_request_qs_msg
3202 * @vf: pointer to the VF info
3203 * @msg: pointer to the msg buffer
3204 *
3205 * VFs get a default number of queues but can use this message to request a
3206 * different number. If the request is successful, PF will reset the VF and
3207 * return 0. If unsuccessful, PF will send message informing VF of number of
3208 * available queue pairs via virtchnl message response to VF.
3209 */
3210static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3211{
3212	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3213	struct virtchnl_vf_res_request *vfres =
3214		(struct virtchnl_vf_res_request *)msg;
3215	u16 req_queues = vfres->num_queue_pairs;
3216	struct ice_pf *pf = vf->pf;
3217	u16 max_allowed_vf_queues;
3218	u16 tx_rx_queue_left;
3219	struct device *dev;
3220	u16 cur_queues;
3221
3222	dev = ice_pf_to_dev(pf);
3223	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3224		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3225		goto error_param;
3226	}
3227
3228	cur_queues = vf->num_vf_qs;
3229	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3230				 ice_get_avail_rxq_count(pf));
3231	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
3232	if (!req_queues) {
3233		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
 
3234			vf->vf_id);
3235	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
3236		dev_err(dev, "VF %d tried to request more than %d queues.\n",
3237			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3238		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
 
3239	} else if (req_queues > cur_queues &&
3240		   req_queues - cur_queues > tx_rx_queue_left) {
3241		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
 
3242			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3243		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3244					       ICE_MAX_RSS_QS_PER_VF);
3245	} else {
3246		/* request is successful, then reset VF */
3247		vf->num_req_qs = req_queues;
3248		ice_vc_reset_vf(vf);
3249		dev_info(dev, "VF %d granted request of %u queues.\n",
 
3250			 vf->vf_id, req_queues);
3251		return 0;
3252	}
3253
3254error_param:
3255	/* send the response to the VF */
3256	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
3257				     v_ret, (u8 *)vfres, sizeof(*vfres));
3258}
3259
3260/**
3261 * ice_set_vf_port_vlan
3262 * @netdev: network interface device structure
3263 * @vf_id: VF identifier
3264 * @vlan_id: VLAN ID being set
3265 * @qos: priority setting
3266 * @vlan_proto: VLAN protocol
3267 *
3268 * program VF Port VLAN ID and/or QoS
3269 */
3270int
3271ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3272		     __be16 vlan_proto)
3273{
3274	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3275	struct device *dev;
 
 
3276	struct ice_vf *vf;
3277	u16 vlanprio;
3278	int ret;
3279
3280	dev = ice_pf_to_dev(pf);
3281	if (ice_validate_vf_id(pf, vf_id))
 
3282		return -EINVAL;
 
3283
3284	if (vlan_id >= VLAN_N_VID || qos > 7) {
3285		dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3286			vf_id, vlan_id, qos);
3287		return -EINVAL;
3288	}
3289
3290	if (vlan_proto != htons(ETH_P_8021Q)) {
3291		dev_err(dev, "VF VLAN protocol is not supported\n");
3292		return -EPROTONOSUPPORT;
3293	}
3294
3295	vf = &pf->vf[vf_id];
3296	ret = ice_check_vf_ready_for_cfg(vf);
3297	if (ret)
 
 
 
 
 
 
 
 
3298		return ret;
 
3299
3300	vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
 
 
 
3301
3302	if (vf->port_vlan_info == vlanprio) {
3303		/* duplicate request, so just return success */
3304		dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
3305		return 0;
 
 
 
3306	}
3307
3308	vf->port_vlan_info = vlanprio;
3309
3310	if (vf->port_vlan_info)
3311		dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
3312			 vlan_id, qos, vf_id);
3313	else
3314		dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
3315
3316	ice_vc_reset_vf(vf);
 
 
 
 
3317
3318	return 0;
3319}
 
 
3320
3321/**
3322 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3323 * @caps: VF driver negotiated capabilities
3324 *
3325 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3326 */
3327static bool ice_vf_vlan_offload_ena(u32 caps)
3328{
3329	return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3330}
3331
3332/**
3333 * ice_vc_process_vlan_msg
3334 * @vf: pointer to the VF info
3335 * @msg: pointer to the msg buffer
3336 * @add_v: Add VLAN if true, otherwise delete VLAN
3337 *
3338 * Process virtchnl op to add or remove programmed guest VLAN ID
3339 */
3340static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3341{
3342	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3343	struct virtchnl_vlan_filter_list *vfl =
3344	    (struct virtchnl_vlan_filter_list *)msg;
3345	struct ice_pf *pf = vf->pf;
3346	bool vlan_promisc = false;
3347	struct ice_vsi *vsi;
3348	struct device *dev;
3349	struct ice_hw *hw;
3350	int status = 0;
3351	u8 promisc_m;
3352	int i;
3353
3354	dev = ice_pf_to_dev(pf);
3355	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3356		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3357		goto error_param;
3358	}
3359
3360	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3361		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3362		goto error_param;
3363	}
3364
3365	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3366		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
 
 
 
 
 
 
3367		goto error_param;
3368	}
3369
3370	for (i = 0; i < vfl->num_elements; i++) {
3371		if (vfl->vlan_id[i] >= VLAN_N_VID) {
3372			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3373			dev_err(dev, "invalid VF VLAN id %d\n",
3374				vfl->vlan_id[i]);
3375			goto error_param;
3376		}
3377	}
3378
3379	hw = &pf->hw;
3380	vsi = pf->vsi[vf->lan_vsi_idx];
3381	if (!vsi) {
3382		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3383		goto error_param;
3384	}
3385
3386	if (add_v && !ice_is_vf_trusted(vf) &&
3387	    vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3388		dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3389			 vf->vf_id);
3390		/* There is no need to let VF know about being not trusted,
3391		 * so we can just return success message here
3392		 */
3393		goto error_param;
3394	}
3395
3396	if (vsi->info.pvid) {
 
 
 
3397		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3398		goto error_param;
3399	}
3400
3401	if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3402	     test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3403	    test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
3404		vlan_promisc = true;
3405
3406	if (add_v) {
3407		for (i = 0; i < vfl->num_elements; i++) {
3408			u16 vid = vfl->vlan_id[i];
3409
3410			if (!ice_is_vf_trusted(vf) &&
3411			    vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3412				dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
 
3413					 vf->vf_id);
3414				/* There is no need to let VF know about being
3415				 * not trusted, so we can just return success
3416				 * message here as well.
3417				 */
3418				goto error_param;
3419			}
3420
3421			/* we add VLAN 0 by default for each VF so we can enable
3422			 * Tx VLAN anti-spoof without triggering MDD events so
3423			 * we don't need to add it again here
3424			 */
3425			if (!vid)
3426				continue;
3427
3428			status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3429			if (status) {
3430				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3431				goto error_param;
3432			}
3433
3434			/* Enable VLAN pruning when non-zero VLAN is added */
3435			if (!vlan_promisc && vid &&
3436			    !ice_vsi_is_vlan_pruning_ena(vsi)) {
3437				status = ice_cfg_vlan_pruning(vsi, true, false);
3438				if (status) {
3439					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3440					dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
 
3441						vid, status);
3442					goto error_param;
3443				}
3444			} else if (vlan_promisc) {
3445				/* Enable Ucast/Mcast VLAN promiscuous mode */
3446				promisc_m = ICE_PROMISC_VLAN_TX |
3447					    ICE_PROMISC_VLAN_RX;
3448
3449				status = ice_set_vsi_promisc(hw, vsi->idx,
3450							     promisc_m, vid);
3451				if (status) {
3452					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3453					dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
 
3454						vid, status);
3455				}
3456			}
3457		}
3458	} else {
3459		/* In case of non_trusted VF, number of VLAN elements passed
3460		 * to PF for removal might be greater than number of VLANs
3461		 * filter programmed for that VF - So, use actual number of
3462		 * VLANS added earlier with add VLAN opcode. In order to avoid
3463		 * removing VLAN that doesn't exist, which result to sending
3464		 * erroneous failed message back to the VF
3465		 */
3466		int num_vf_vlan;
3467
3468		num_vf_vlan = vsi->num_vlan;
3469		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
3470			u16 vid = vfl->vlan_id[i];
3471
3472			/* we add VLAN 0 by default for each VF so we can enable
3473			 * Tx VLAN anti-spoof without triggering MDD events so
3474			 * we don't want a VIRTCHNL request to remove it
3475			 */
3476			if (!vid)
3477				continue;
3478
3479			/* Make sure ice_vsi_kill_vlan is successful before
3480			 * updating VLAN information
3481			 */
3482			status = ice_vsi_kill_vlan(vsi, vid);
3483			if (status) {
3484				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3485				goto error_param;
3486			}
3487
3488			/* Disable VLAN pruning when only VLAN 0 is left */
3489			if (vsi->num_vlan == 1 &&
3490			    ice_vsi_is_vlan_pruning_ena(vsi))
3491				ice_cfg_vlan_pruning(vsi, false, false);
3492
3493			/* Disable Unicast/Multicast VLAN promiscuous mode */
3494			if (vlan_promisc) {
3495				promisc_m = ICE_PROMISC_VLAN_TX |
3496					    ICE_PROMISC_VLAN_RX;
3497
3498				ice_clear_vsi_promisc(hw, vsi->idx,
3499						      promisc_m, vid);
3500			}
3501		}
3502	}
3503
3504error_param:
3505	/* send the response to the VF */
3506	if (add_v)
3507		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
3508					     NULL, 0);
3509	else
3510		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
3511					     NULL, 0);
3512}
3513
3514/**
3515 * ice_vc_add_vlan_msg
3516 * @vf: pointer to the VF info
3517 * @msg: pointer to the msg buffer
3518 *
3519 * Add and program guest VLAN ID
3520 */
3521static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3522{
3523	return ice_vc_process_vlan_msg(vf, msg, true);
3524}
3525
3526/**
3527 * ice_vc_remove_vlan_msg
3528 * @vf: pointer to the VF info
3529 * @msg: pointer to the msg buffer
3530 *
3531 * remove programmed guest VLAN ID
3532 */
3533static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3534{
3535	return ice_vc_process_vlan_msg(vf, msg, false);
3536}
3537
3538/**
3539 * ice_vc_ena_vlan_stripping
3540 * @vf: pointer to the VF info
3541 *
3542 * Enable VLAN header stripping for a given VF
3543 */
3544static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3545{
3546	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3547	struct ice_pf *pf = vf->pf;
3548	struct ice_vsi *vsi;
3549
3550	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3551		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3552		goto error_param;
3553	}
3554
3555	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3556		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3557		goto error_param;
3558	}
3559
3560	vsi = pf->vsi[vf->lan_vsi_idx];
3561	if (ice_vsi_manage_vlan_stripping(vsi, true))
3562		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3563
3564error_param:
3565	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3566				     v_ret, NULL, 0);
3567}
3568
3569/**
3570 * ice_vc_dis_vlan_stripping
3571 * @vf: pointer to the VF info
3572 *
3573 * Disable VLAN header stripping for a given VF
3574 */
3575static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3576{
3577	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3578	struct ice_pf *pf = vf->pf;
3579	struct ice_vsi *vsi;
3580
3581	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3582		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3583		goto error_param;
3584	}
3585
3586	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3587		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3588		goto error_param;
3589	}
3590
3591	vsi = pf->vsi[vf->lan_vsi_idx];
3592	if (!vsi) {
3593		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3594		goto error_param;
3595	}
3596
3597	if (ice_vsi_manage_vlan_stripping(vsi, false))
3598		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3599
3600error_param:
3601	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3602				     v_ret, NULL, 0);
3603}
3604
3605/**
3606 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3607 * @vf: VF to enable/disable VLAN stripping for on initialization
3608 *
3609 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3610 * the flag is cleared then we want to disable stripping. For example, the flag
3611 * will be cleared when port VLANs are configured by the administrator before
3612 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3613 * offloads.
3614 */
3615static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3616{
3617	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3618
3619	if (!vsi)
3620		return -EINVAL;
3621
3622	/* don't modify stripping if port VLAN is configured */
3623	if (vsi->info.pvid)
3624		return 0;
3625
3626	if (ice_vf_vlan_offload_ena(vf->driver_caps))
3627		return ice_vsi_manage_vlan_stripping(vsi, true);
3628	else
3629		return ice_vsi_manage_vlan_stripping(vsi, false);
3630}
3631
3632/**
3633 * ice_vc_process_vf_msg - Process request from VF
3634 * @pf: pointer to the PF structure
3635 * @event: pointer to the AQ event
3636 *
3637 * called from the common asq/arq handler to
3638 * process request from VF
3639 */
3640void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3641{
3642	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3643	s16 vf_id = le16_to_cpu(event->desc.retval);
3644	u16 msglen = event->msg_len;
3645	u8 *msg = event->msg_buf;
3646	struct ice_vf *vf = NULL;
3647	struct device *dev;
3648	int err = 0;
3649
3650	dev = ice_pf_to_dev(pf);
3651	if (ice_validate_vf_id(pf, vf_id)) {
3652		err = -EINVAL;
3653		goto error_handler;
3654	}
3655
3656	vf = &pf->vf[vf_id];
3657
3658	/* Check if VF is disabled. */
3659	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3660		err = -EPERM;
3661		goto error_handler;
3662	}
3663
3664	/* Perform basic checks on the msg */
3665	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3666	if (err) {
3667		if (err == VIRTCHNL_STATUS_ERR_PARAM)
3668			err = -EPERM;
3669		else
3670			err = -EINVAL;
3671	}
3672
3673error_handler:
3674	if (err) {
3675		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3676				      NULL, 0);
3677		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3678			vf_id, v_opcode, msglen, err);
3679		return;
3680	}
3681
3682	switch (v_opcode) {
3683	case VIRTCHNL_OP_VERSION:
3684		err = ice_vc_get_ver_msg(vf, msg);
3685		break;
3686	case VIRTCHNL_OP_GET_VF_RESOURCES:
3687		err = ice_vc_get_vf_res_msg(vf, msg);
3688		if (ice_vf_init_vlan_stripping(vf))
3689			dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
3690				vf->vf_id);
3691		ice_vc_notify_vf_link_state(vf);
3692		break;
3693	case VIRTCHNL_OP_RESET_VF:
3694		ice_vc_reset_vf_msg(vf);
3695		break;
3696	case VIRTCHNL_OP_ADD_ETH_ADDR:
3697		err = ice_vc_add_mac_addr_msg(vf, msg);
3698		break;
3699	case VIRTCHNL_OP_DEL_ETH_ADDR:
3700		err = ice_vc_del_mac_addr_msg(vf, msg);
3701		break;
3702	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3703		err = ice_vc_cfg_qs_msg(vf, msg);
3704		break;
3705	case VIRTCHNL_OP_ENABLE_QUEUES:
3706		err = ice_vc_ena_qs_msg(vf, msg);
3707		ice_vc_notify_vf_link_state(vf);
3708		break;
3709	case VIRTCHNL_OP_DISABLE_QUEUES:
3710		err = ice_vc_dis_qs_msg(vf, msg);
3711		break;
3712	case VIRTCHNL_OP_REQUEST_QUEUES:
3713		err = ice_vc_request_qs_msg(vf, msg);
3714		break;
3715	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3716		err = ice_vc_cfg_irq_map_msg(vf, msg);
3717		break;
3718	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3719		err = ice_vc_config_rss_key(vf, msg);
3720		break;
3721	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3722		err = ice_vc_config_rss_lut(vf, msg);
3723		break;
3724	case VIRTCHNL_OP_GET_STATS:
3725		err = ice_vc_get_stats_msg(vf, msg);
3726		break;
3727	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3728		err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3729		break;
3730	case VIRTCHNL_OP_ADD_VLAN:
3731		err = ice_vc_add_vlan_msg(vf, msg);
3732		break;
3733	case VIRTCHNL_OP_DEL_VLAN:
3734		err = ice_vc_remove_vlan_msg(vf, msg);
3735		break;
3736	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3737		err = ice_vc_ena_vlan_stripping(vf);
3738		break;
3739	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3740		err = ice_vc_dis_vlan_stripping(vf);
3741		break;
3742	case VIRTCHNL_OP_UNKNOWN:
3743	default:
3744		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3745			vf_id);
3746		err = ice_vc_send_msg_to_vf(vf, v_opcode,
3747					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3748					    NULL, 0);
3749		break;
3750	}
3751	if (err) {
3752		/* Helper function cares less about error return values here
3753		 * as it is busy with pending work.
3754		 */
3755		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
 
3756			 vf_id, v_opcode, err);
3757	}
3758}
3759
3760/**
3761 * ice_get_vf_cfg
3762 * @netdev: network interface device structure
3763 * @vf_id: VF identifier
3764 * @ivi: VF configuration structure
3765 *
3766 * return VF configuration
3767 */
3768int
3769ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3770{
3771	struct ice_pf *pf = ice_netdev_to_pf(netdev);
 
 
3772	struct ice_vf *vf;
3773
3774	if (ice_validate_vf_id(pf, vf_id))
 
 
3775		return -EINVAL;
 
3776
3777	vf = &pf->vf[vf_id];
 
3778
3779	if (ice_check_vf_init(pf, vf))
 
3780		return -EBUSY;
 
3781
3782	ivi->vf = vf_id;
3783	ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3784
3785	/* VF configuration for VLAN and applicable QoS */
3786	ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3787	ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 
3788
3789	ivi->trusted = vf->trusted;
3790	ivi->spoofchk = vf->spoofchk;
3791	if (!vf->link_forced)
3792		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3793	else if (vf->link_up)
3794		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3795	else
3796		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3797	ivi->max_tx_rate = vf->tx_rate;
3798	ivi->min_tx_rate = 0;
3799	return 0;
3800}
3801
3802/**
3803 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3804 * @pf: PF used to reference the switch's rules
3805 * @umac: unicast MAC to compare against existing switch rules
3806 *
3807 * Return true on the first/any match, else return false
3808 */
3809static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3810{
3811	struct ice_sw_recipe *mac_recipe_list =
3812		&pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3813	struct ice_fltr_mgmt_list_entry *list_itr;
3814	struct list_head *rule_head;
3815	struct mutex *rule_lock; /* protect MAC filter list access */
3816
3817	rule_head = &mac_recipe_list->filt_rules;
3818	rule_lock = &mac_recipe_list->filt_rule_lock;
3819
3820	mutex_lock(rule_lock);
3821	list_for_each_entry(list_itr, rule_head, list_entry) {
3822		u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3823
3824		if (ether_addr_equal(existing_mac, umac)) {
3825			mutex_unlock(rule_lock);
3826			return true;
3827		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3828	}
3829
3830	mutex_unlock(rule_lock);
 
 
 
 
 
 
3831
3832	return false;
 
 
 
 
 
3833}
3834
3835/**
3836 * ice_set_vf_mac
3837 * @netdev: network interface device structure
3838 * @vf_id: VF identifier
3839 * @mac: MAC address
3840 *
3841 * program VF MAC address
3842 */
3843int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3844{
3845	struct ice_pf *pf = ice_netdev_to_pf(netdev);
 
 
3846	struct ice_vf *vf;
3847	int ret;
3848
3849	if (ice_validate_vf_id(pf, vf_id))
3850		return -EINVAL;
3851
3852	if (is_multicast_ether_addr(mac)) {
3853		netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3854		return -EINVAL;
3855	}
3856
3857	vf = &pf->vf[vf_id];
3858	/* nothing left to do, unicast MAC already set */
3859	if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3860		return 0;
 
3861
3862	ret = ice_check_vf_ready_for_cfg(vf);
3863	if (ret)
3864		return ret;
3865
3866	if (ice_unicast_mac_exists(pf, mac)) {
3867		netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3868			   mac, vf_id, mac);
3869		return -EINVAL;
3870	}
3871
3872	/* VF is notified of its new MAC via the PF's response to the
3873	 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
 
 
3874	 */
3875	ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3876	if (is_zero_ether_addr(mac)) {
3877		/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
3878		vf->pf_set_mac = false;
3879		netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
3880			    vf->vf_id);
3881	} else {
3882		/* PF will add MAC rule for the VF */
3883		vf->pf_set_mac = true;
3884		netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
3885			    mac, vf_id);
3886	}
3887
3888	ice_vc_reset_vf(vf);
3889	return 0;
3890}
3891
3892/**
3893 * ice_set_vf_trust
3894 * @netdev: network interface device structure
3895 * @vf_id: VF identifier
3896 * @trusted: Boolean value to enable/disable trusted VF
3897 *
3898 * Enable or disable a given VF as trusted
3899 */
3900int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3901{
3902	struct ice_pf *pf = ice_netdev_to_pf(netdev);
 
 
3903	struct ice_vf *vf;
3904	int ret;
3905
3906	if (ice_validate_vf_id(pf, vf_id))
 
 
3907		return -EINVAL;
 
3908
3909	vf = &pf->vf[vf_id];
3910	ret = ice_check_vf_ready_for_cfg(vf);
3911	if (ret)
3912		return ret;
 
3913
3914	/* Check if already trusted */
3915	if (trusted == vf->trusted)
3916		return 0;
3917
3918	vf->trusted = trusted;
3919	ice_vc_reset_vf(vf);
3920	dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
3921		 vf_id, trusted ? "" : "un");
3922
3923	return 0;
3924}
3925
3926/**
3927 * ice_set_vf_link_state
3928 * @netdev: network interface device structure
3929 * @vf_id: VF identifier
3930 * @link_state: required link state
3931 *
3932 * Set VF's link state, irrespective of physical link state status
3933 */
3934int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3935{
3936	struct ice_pf *pf = ice_netdev_to_pf(netdev);
 
 
 
3937	struct ice_vf *vf;
3938	int ret;
3939
3940	if (ice_validate_vf_id(pf, vf_id))
 
3941		return -EINVAL;
 
3942
3943	vf = &pf->vf[vf_id];
3944	ret = ice_check_vf_ready_for_cfg(vf);
3945	if (ret)
3946		return ret;
 
 
 
 
 
 
 
3947
3948	switch (link_state) {
3949	case IFLA_VF_LINK_STATE_AUTO:
3950		vf->link_forced = false;
 
3951		break;
3952	case IFLA_VF_LINK_STATE_ENABLE:
3953		vf->link_forced = true;
3954		vf->link_up = true;
3955		break;
3956	case IFLA_VF_LINK_STATE_DISABLE:
3957		vf->link_forced = true;
3958		vf->link_up = false;
3959		break;
3960	default:
3961		return -EINVAL;
3962	}
3963
3964	ice_vc_notify_vf_link_state(vf);
 
 
 
3965
3966	return 0;
3967}
3968
3969/**
3970 * ice_get_vf_stats - populate some stats for the VF
3971 * @netdev: the netdev of the PF
3972 * @vf_id: the host OS identifier (0-255)
3973 * @vf_stats: pointer to the OS memory to be initialized
3974 */
3975int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3976		     struct ifla_vf_stats *vf_stats)
3977{
3978	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3979	struct ice_eth_stats *stats;
3980	struct ice_vsi *vsi;
3981	struct ice_vf *vf;
3982	int ret;
3983
3984	if (ice_validate_vf_id(pf, vf_id))
3985		return -EINVAL;
3986
3987	vf = &pf->vf[vf_id];
3988	ret = ice_check_vf_ready_for_cfg(vf);
3989	if (ret)
3990		return ret;
3991
3992	vsi = pf->vsi[vf->lan_vsi_idx];
3993	if (!vsi)
3994		return -EINVAL;
3995
3996	ice_update_eth_stats(vsi);
3997	stats = &vsi->eth_stats;
3998
3999	memset(vf_stats, 0, sizeof(*vf_stats));
4000
4001	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4002		stats->rx_multicast;
4003	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4004		stats->tx_multicast;
4005	vf_stats->rx_bytes   = stats->rx_bytes;
4006	vf_stats->tx_bytes   = stats->tx_bytes;
4007	vf_stats->broadcast  = stats->rx_broadcast;
4008	vf_stats->multicast  = stats->rx_multicast;
4009	vf_stats->rx_dropped = stats->rx_discards;
4010	vf_stats->tx_dropped = stats->tx_discards;
4011
4012	return 0;
4013}
4014
4015/**
4016 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4017 * @vf: pointer to the VF structure
4018 */
4019void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4020{
4021	struct ice_pf *pf = vf->pf;
4022	struct device *dev;
4023
4024	dev = ice_pf_to_dev(pf);
4025
4026	dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4027		 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4028		 vf->dflt_lan_addr.addr,
4029		 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4030			  ? "on" : "off");
4031}
4032
4033/**
4034 * ice_print_vfs_mdd_event - print VFs malicious driver detect event
4035 * @pf: pointer to the PF structure
4036 *
4037 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4038 */
4039void ice_print_vfs_mdd_events(struct ice_pf *pf)
4040{
4041	struct device *dev = ice_pf_to_dev(pf);
4042	struct ice_hw *hw = &pf->hw;
4043	int i;
4044
4045	/* check that there are pending MDD events to print */
4046	if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4047		return;
4048
4049	/* VF MDD event logs are rate limited to one second intervals */
4050	if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4051		return;
4052
4053	pf->last_printed_mdd_jiffies = jiffies;
4054
4055	ice_for_each_vf(pf, i) {
4056		struct ice_vf *vf = &pf->vf[i];
4057
4058		/* only print Rx MDD event message if there are new events */
4059		if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4060			vf->mdd_rx_events.last_printed =
4061							vf->mdd_rx_events.count;
4062			ice_print_vf_rx_mdd_event(vf);
4063		}
4064
4065		/* only print Tx MDD event message if there are new events */
4066		if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4067			vf->mdd_tx_events.last_printed =
4068							vf->mdd_tx_events.count;
4069
4070			dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4071				 vf->mdd_tx_events.count, hw->pf_id, i,
4072				 vf->dflt_lan_addr.addr);
4073		}
4074	}
4075}
4076
4077/**
4078 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
4079 * @pdev: pointer to a pci_dev structure
4080 *
4081 * Called when recovering from a PF FLR to restore interrupt capability to
4082 * the VFs.
4083 */
4084void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
4085{
4086	struct pci_dev *vfdev;
4087	u16 vf_id;
4088	int pos;
4089
4090	if (!pci_num_vf(pdev))
4091		return;
4092
4093	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4094	if (pos) {
4095		pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
4096				     &vf_id);
4097		vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
4098		while (vfdev) {
4099			if (vfdev->is_virtfn && vfdev->physfn == pdev)
4100				pci_restore_msi_state(vfdev);
4101			vfdev = pci_get_device(pdev->vendor, vf_id,
4102					       vfdev);
4103		}
4104	}
4105}