Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_vf_lib_private.h"
   6#include "ice_base.h"
   7#include "ice_lib.h"
   8#include "ice_fltr.h"
   9#include "ice_dcb_lib.h"
  10#include "ice_flow.h"
  11#include "ice_eswitch.h"
  12#include "ice_virtchnl_allowlist.h"
  13#include "ice_flex_pipe.h"
  14#include "ice_vf_vsi_vlan_ops.h"
  15#include "ice_vlan.h"
  16
  17/**
  18 * ice_free_vf_entries - Free all VF entries from the hash table
  19 * @pf: pointer to the PF structure
  20 *
  21 * Iterate over the VF hash table, removing and releasing all VF entries.
  22 * Called during VF teardown or as cleanup during failed VF initialization.
  23 */
  24static void ice_free_vf_entries(struct ice_pf *pf)
  25{
  26	struct ice_vfs *vfs = &pf->vfs;
  27	struct hlist_node *tmp;
  28	struct ice_vf *vf;
  29	unsigned int bkt;
  30
  31	/* Remove all VFs from the hash table and release their main
  32	 * reference. Once all references to the VF are dropped, ice_put_vf()
  33	 * will call ice_release_vf which will remove the VF memory.
  34	 */
  35	lockdep_assert_held(&vfs->table_lock);
  36
  37	hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
  38		hash_del_rcu(&vf->entry);
  39		ice_put_vf(vf);
  40	}
  41}
  42
  43/**
  44 * ice_free_vf_res - Free a VF's resources
  45 * @vf: pointer to the VF info
  46 */
  47static void ice_free_vf_res(struct ice_vf *vf)
  48{
  49	struct ice_pf *pf = vf->pf;
  50	int i, last_vector_idx;
  51
  52	/* First, disable VF's configuration API to prevent OS from
  53	 * accessing the VF's VSI after it's freed or invalidated.
  54	 */
  55	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
  56	ice_vf_fdir_exit(vf);
  57	/* free VF control VSI */
  58	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
  59		ice_vf_ctrl_vsi_release(vf);
  60
  61	/* free VSI and disconnect it from the parent uplink */
  62	if (vf->lan_vsi_idx != ICE_NO_VSI) {
  63		ice_vf_vsi_release(vf);
  64		vf->num_mac = 0;
  65	}
  66
  67	last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
  68
  69	/* clear VF MDD event information */
  70	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
  71	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
  72
  73	/* Disable interrupts so that VF starts in a known state */
  74	for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
  75		wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
  76		ice_flush(&pf->hw);
  77	}
  78	/* reset some of the state variables keeping track of the resources */
  79	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
  80	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
  81}
  82
  83/**
  84 * ice_dis_vf_mappings
  85 * @vf: pointer to the VF structure
  86 */
  87static void ice_dis_vf_mappings(struct ice_vf *vf)
  88{
  89	struct ice_pf *pf = vf->pf;
  90	struct ice_vsi *vsi;
  91	struct device *dev;
  92	int first, last, v;
  93	struct ice_hw *hw;
  94
  95	hw = &pf->hw;
  96	vsi = ice_get_vf_vsi(vf);
  97	if (WARN_ON(!vsi))
  98		return;
  99
 100	dev = ice_pf_to_dev(pf);
 101	wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
 102	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
 103
 104	first = vf->first_vector_idx;
 105	last = first + vf->num_msix - 1;
 106	for (v = first; v <= last; v++) {
 107		u32 reg;
 108
 109		reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) |
 110		      FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
 111		wr32(hw, GLINT_VECT2FUNC(v), reg);
 112	}
 113
 114	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
 115		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
 116	else
 117		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
 118
 119	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
 120		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
 121	else
 122		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
 123}
 124
 125/**
 126 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
 127 * @pf: pointer to the PF structure
 128 *
 129 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
 130 * the pf->sriov_base_vector.
 131 *
 132 * Returns 0 on success, and -EINVAL on error.
 133 */
 134static int ice_sriov_free_msix_res(struct ice_pf *pf)
 135{
 136	if (!pf)
 137		return -EINVAL;
 138
 139	bitmap_free(pf->sriov_irq_bm);
 140	pf->sriov_irq_size = 0;
 141	pf->sriov_base_vector = 0;
 142
 143	return 0;
 144}
 145
 146/**
 147 * ice_free_vfs - Free all VFs
 148 * @pf: pointer to the PF structure
 149 */
 150void ice_free_vfs(struct ice_pf *pf)
 151{
 152	struct device *dev = ice_pf_to_dev(pf);
 153	struct ice_vfs *vfs = &pf->vfs;
 154	struct ice_hw *hw = &pf->hw;
 155	struct ice_vf *vf;
 156	unsigned int bkt;
 157
 158	if (!ice_has_vfs(pf))
 159		return;
 160
 161	while (test_and_set_bit(ICE_VF_DIS, pf->state))
 162		usleep_range(1000, 2000);
 163
 164	/* Disable IOV before freeing resources. This lets any VF drivers
 165	 * running in the host get themselves cleaned up before we yank
 166	 * the carpet out from underneath their feet.
 167	 */
 168	if (!pci_vfs_assigned(pf->pdev))
 169		pci_disable_sriov(pf->pdev);
 170	else
 171		dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
 172
 173	ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
 174
 175	mutex_lock(&vfs->table_lock);
 176
 177	ice_for_each_vf(pf, bkt, vf) {
 178		mutex_lock(&vf->cfg_lock);
 179
 180		ice_eswitch_detach(pf, vf);
 181		ice_dis_vf_qs(vf);
 182
 183		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
 184			/* disable VF qp mappings and set VF disable state */
 185			ice_dis_vf_mappings(vf);
 186			set_bit(ICE_VF_STATE_DIS, vf->vf_states);
 187			ice_free_vf_res(vf);
 188		}
 189
 190		if (!pci_vfs_assigned(pf->pdev)) {
 191			u32 reg_idx, bit_idx;
 192
 193			reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
 194			bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
 195			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 196		}
 197
 198		/* clear malicious info since the VF is getting released */
 199		list_del(&vf->mbx_info.list_entry);
 200
 201		mutex_unlock(&vf->cfg_lock);
 202	}
 203
 204	if (ice_sriov_free_msix_res(pf))
 205		dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
 206
 207	vfs->num_qps_per = 0;
 208	ice_free_vf_entries(pf);
 209
 210	mutex_unlock(&vfs->table_lock);
 211
 212	clear_bit(ICE_VF_DIS, pf->state);
 213	clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 214}
 215
 216/**
 217 * ice_vf_vsi_setup - Set up a VF VSI
 218 * @vf: VF to setup VSI for
 219 *
 220 * Returns pointer to the successfully allocated VSI struct on success,
 221 * otherwise returns NULL on failure.
 222 */
 223static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
 224{
 225	struct ice_vsi_cfg_params params = {};
 226	struct ice_pf *pf = vf->pf;
 227	struct ice_vsi *vsi;
 228
 229	params.type = ICE_VSI_VF;
 230	params.pi = ice_vf_get_port_info(vf);
 231	params.vf = vf;
 232	params.flags = ICE_VSI_FLAG_INIT;
 233
 234	vsi = ice_vsi_setup(pf, &params);
 235
 236	if (!vsi) {
 237		dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
 238		ice_vf_invalidate_vsi(vf);
 239		return NULL;
 240	}
 241
 242	vf->lan_vsi_idx = vsi->idx;
 243
 244	return vsi;
 245}
 246
 247
 248/**
 249 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
 250 * @vf: VF to enable MSIX mappings for
 251 *
 252 * Some of the registers need to be indexed/configured using hardware global
 253 * device values and other registers need 0-based values, which represent PF
 254 * based values.
 255 */
 256static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
 257{
 258	int device_based_first_msix, device_based_last_msix;
 259	int pf_based_first_msix, pf_based_last_msix, v;
 260	struct ice_pf *pf = vf->pf;
 261	int device_based_vf_id;
 262	struct ice_hw *hw;
 263	u32 reg;
 264
 265	hw = &pf->hw;
 266	pf_based_first_msix = vf->first_vector_idx;
 267	pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1;
 268
 269	device_based_first_msix = pf_based_first_msix +
 270		pf->hw.func_caps.common_cap.msix_vector_first_id;
 271	device_based_last_msix =
 272		(device_based_first_msix + vf->num_msix) - 1;
 273	device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 274
 275	reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) |
 276	      FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) |
 277	      VPINT_ALLOC_VALID_M;
 278	wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
 279
 280	reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) |
 281	      FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) |
 282	      VPINT_ALLOC_PCI_VALID_M;
 283	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
 284
 285	/* map the interrupts to its functions */
 286	for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
 287		reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) |
 288		      FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
 289		wr32(hw, GLINT_VECT2FUNC(v), reg);
 290	}
 291
 292	/* Map mailbox interrupt to VF MSI-X vector 0 */
 293	wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
 294}
 295
 296/**
 297 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
 298 * @vf: VF to enable the mappings for
 299 * @max_txq: max Tx queues allowed on the VF's VSI
 300 * @max_rxq: max Rx queues allowed on the VF's VSI
 301 */
 302static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
 303{
 304	struct device *dev = ice_pf_to_dev(vf->pf);
 305	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 306	struct ice_hw *hw = &vf->pf->hw;
 307	u32 reg;
 308
 309	if (WARN_ON(!vsi))
 310		return;
 311
 312	/* set regardless of mapping mode */
 313	wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
 314
 315	/* VF Tx queues allocation */
 316	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 317		/* set the VF PF Tx queue range
 318		 * VFNUMQ value should be set to (number of queues - 1). A value
 319		 * of 0 means 1 queue and a value of 255 means 256 queues
 320		 */
 321		reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) |
 322		      FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1);
 323		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
 324	} else {
 325		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
 326	}
 327
 328	/* set regardless of mapping mode */
 329	wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
 330
 331	/* VF Rx queues allocation */
 332	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 333		/* set the VF PF Rx queue range
 334		 * VFNUMQ value should be set to (number of queues - 1). A value
 335		 * of 0 means 1 queue and a value of 255 means 256 queues
 336		 */
 337		reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) |
 338		      FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1);
 339		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
 340	} else {
 341		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
 342	}
 343}
 344
 345/**
 346 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
 347 * @vf: pointer to the VF structure
 348 */
 349static void ice_ena_vf_mappings(struct ice_vf *vf)
 350{
 351	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 352
 353	if (WARN_ON(!vsi))
 354		return;
 355
 356	ice_ena_vf_msix_mappings(vf);
 357	ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
 358}
 359
 360/**
 361 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
 362 * @vf: VF to calculate the register index for
 363 * @q_vector: a q_vector associated to the VF
 364 */
 365int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
 366{
 367	if (!vf || !q_vector)
 368		return -EINVAL;
 369
 370	/* always add one to account for the OICR being the first MSIX */
 371	return vf->first_vector_idx + q_vector->v_idx + 1;
 372}
 373
 374/**
 375 * ice_sriov_set_msix_res - Set any used MSIX resources
 376 * @pf: pointer to PF structure
 377 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
 378 *
 379 * This function allows SR-IOV resources to be taken from the end of the PF's
 380 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
 381 * just set the pf->sriov_base_vector and return success.
 382 *
 383 * If there are not enough resources available, return an error. This should
 384 * always be caught by ice_set_per_vf_res().
 385 *
 386 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
 387 * in the PF's space available for SR-IOV.
 388 */
 389static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
 390{
 391	u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
 392	int vectors_used = ice_get_max_used_msix_vector(pf);
 393	int sriov_base_vector;
 394
 395	sriov_base_vector = total_vectors - num_msix_needed;
 396
 397	/* make sure we only grab irq_tracker entries from the list end and
 398	 * that we have enough available MSIX vectors
 399	 */
 400	if (sriov_base_vector < vectors_used)
 401		return -EINVAL;
 402
 403	pf->sriov_base_vector = sriov_base_vector;
 404
 405	return 0;
 406}
 407
 408/**
 409 * ice_set_per_vf_res - check if vectors and queues are available
 410 * @pf: pointer to the PF structure
 411 * @num_vfs: the number of SR-IOV VFs being configured
 412 *
 413 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
 414 * get more vectors and can enable more queues per VF. Note that this does not
 415 * grab any vectors from the SW pool already allocated. Also note, that all
 416 * vector counts include one for each VF's miscellaneous interrupt vector
 417 * (i.e. OICR).
 418 *
 419 * Minimum VFs - 2 vectors, 1 queue pair
 420 * Small VFs - 5 vectors, 4 queue pairs
 421 * Medium VFs - 17 vectors, 16 queue pairs
 422 *
 423 * Second, determine number of queue pairs per VF by starting with a pre-defined
 424 * maximum each VF supports. If this is not possible, then we adjust based on
 425 * queue pairs available on the device.
 426 *
 427 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
 428 * by each VF during VF initialization and reset.
 429 */
 430static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
 431{
 432	int vectors_used = ice_get_max_used_msix_vector(pf);
 433	u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
 434	int msix_avail_per_vf, msix_avail_for_sriov;
 435	struct device *dev = ice_pf_to_dev(pf);
 436	int err;
 437
 438	lockdep_assert_held(&pf->vfs.table_lock);
 439
 440	if (!num_vfs)
 441		return -EINVAL;
 442
 443	/* determine MSI-X resources per VF */
 444	msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
 445		vectors_used;
 446	msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
 447	if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
 448		num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
 449	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
 450		num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
 451	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
 452		num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
 453	} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
 454		num_msix_per_vf = ICE_MIN_INTR_PER_VF;
 455	} else {
 456		dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
 457			msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
 458			num_vfs);
 459		return -ENOSPC;
 460	}
 461
 462	num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
 463			ICE_MAX_RSS_QS_PER_VF);
 464	avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
 465	if (!avail_qs)
 466		num_txq = 0;
 467	else if (num_txq > avail_qs)
 468		num_txq = rounddown_pow_of_two(avail_qs);
 469
 470	num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
 471			ICE_MAX_RSS_QS_PER_VF);
 472	avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
 473	if (!avail_qs)
 474		num_rxq = 0;
 475	else if (num_rxq > avail_qs)
 476		num_rxq = rounddown_pow_of_two(avail_qs);
 477
 478	if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
 479		dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
 480			ICE_MIN_QS_PER_VF, num_vfs);
 481		return -ENOSPC;
 482	}
 483
 484	err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
 485	if (err) {
 486		dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
 487			num_vfs, err);
 488		return err;
 489	}
 490
 491	/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
 492	pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
 493	pf->vfs.num_msix_per = num_msix_per_vf;
 494	dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
 495		 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
 496
 497	return 0;
 498}
 499
 500/**
 501 * ice_sriov_get_irqs - get irqs for SR-IOV usacase
 502 * @pf: pointer to PF structure
 503 * @needed: number of irqs to get
 504 *
 505 * This returns the first MSI-X vector index in PF space that is used by this
 506 * VF. This index is used when accessing PF relative registers such as
 507 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
 508 * This will always be the OICR index in the AVF driver so any functionality
 509 * using vf->first_vector_idx for queue configuration_id: id of VF which will
 510 * use this irqs
 511 *
 512 * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
 513 * allocated from the end of global irq index. First bit in sriov_irq_bm means
 514 * last irq index etc. It simplifies extension of SRIOV vectors.
 515 * They will be always located from sriov_base_vector to the last irq
 516 * index. While increasing/decreasing sriov_base_vector can be moved.
 517 */
 518static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
 519{
 520	int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
 521					     pf->sriov_irq_size, 0, needed, 0);
 522	/* conversion from number in bitmap to global irq index */
 523	int index = pf->sriov_irq_size - res - needed;
 524
 525	if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
 526		return -ENOENT;
 527
 528	bitmap_set(pf->sriov_irq_bm, res, needed);
 529	return index;
 530}
 531
 532/**
 533 * ice_sriov_free_irqs - free irqs used by the VF
 534 * @pf: pointer to PF structure
 535 * @vf: pointer to VF structure
 536 */
 537static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
 538{
 539	/* Move back from first vector index to first index in bitmap */
 540	int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
 541
 542	bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
 543	vf->first_vector_idx = 0;
 544}
 545
 546/**
 547 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
 548 * @vf: VF to initialize/setup the VSI for
 549 *
 550 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
 551 * VF VSI's broadcast filter and is only used during initial VF creation.
 552 */
 553static int ice_init_vf_vsi_res(struct ice_vf *vf)
 554{
 555	struct ice_pf *pf = vf->pf;
 556	struct ice_vsi *vsi;
 557	int err;
 558
 559	vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
 560	if (vf->first_vector_idx < 0)
 561		return -ENOMEM;
 562
 563	vsi = ice_vf_vsi_setup(vf);
 564	if (!vsi)
 565		return -ENOMEM;
 566
 567	err = ice_vf_init_host_cfg(vf, vsi);
 568	if (err)
 569		goto release_vsi;
 570
 571	return 0;
 572
 573release_vsi:
 574	ice_vf_vsi_release(vf);
 575	return err;
 576}
 577
 578/**
 579 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
 580 * @pf: PF the VFs are associated with
 581 */
 582static int ice_start_vfs(struct ice_pf *pf)
 583{
 584	struct ice_hw *hw = &pf->hw;
 585	unsigned int bkt, it_cnt;
 586	struct ice_vf *vf;
 587	int retval;
 588
 589	lockdep_assert_held(&pf->vfs.table_lock);
 590
 591	it_cnt = 0;
 592	ice_for_each_vf(pf, bkt, vf) {
 593		vf->vf_ops->clear_reset_trigger(vf);
 594
 595		retval = ice_init_vf_vsi_res(vf);
 596		if (retval) {
 597			dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
 598				vf->vf_id, retval);
 599			goto teardown;
 600		}
 601
 602		retval = ice_eswitch_attach(pf, vf);
 603		if (retval) {
 604			dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
 605				vf->vf_id, retval);
 606			ice_vf_vsi_release(vf);
 607			goto teardown;
 608		}
 609
 610		set_bit(ICE_VF_STATE_INIT, vf->vf_states);
 611		ice_ena_vf_mappings(vf);
 612		wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
 613		it_cnt++;
 614	}
 615
 616	ice_flush(hw);
 617	return 0;
 618
 619teardown:
 620	ice_for_each_vf(pf, bkt, vf) {
 621		if (it_cnt == 0)
 622			break;
 623
 624		ice_dis_vf_mappings(vf);
 625		ice_vf_vsi_release(vf);
 626		it_cnt--;
 627	}
 628
 629	return retval;
 630}
 631
 632/**
 633 * ice_sriov_free_vf - Free VF memory after all references are dropped
 634 * @vf: pointer to VF to free
 635 *
 636 * Called by ice_put_vf through ice_release_vf once the last reference to a VF
 637 * structure has been dropped.
 638 */
 639static void ice_sriov_free_vf(struct ice_vf *vf)
 640{
 641	mutex_destroy(&vf->cfg_lock);
 642
 643	kfree_rcu(vf, rcu);
 644}
 645
 646/**
 647 * ice_sriov_clear_reset_state - clears VF Reset status register
 648 * @vf: the vf to configure
 649 */
 650static void ice_sriov_clear_reset_state(struct ice_vf *vf)
 651{
 652	struct ice_hw *hw = &vf->pf->hw;
 653
 654	/* Clear the reset status register so that VF immediately sees that
 655	 * the device is resetting, even if hardware hasn't yet gotten around
 656	 * to clearing VFGEN_RSTAT for us.
 657	 */
 658	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
 659}
 660
 661/**
 662 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
 663 * @vf: the vf to configure
 664 */
 665static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
 666{
 667	struct ice_pf *pf = vf->pf;
 668
 669	wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
 670	wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
 671}
 672
 673/**
 674 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
 675 * @vf: pointer to VF structure
 676 * @is_vflr: true if reset occurred due to VFLR
 677 *
 678 * Trigger and cleanup after a VF reset for a SR-IOV VF.
 679 */
 680static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
 681{
 682	struct ice_pf *pf = vf->pf;
 683	u32 reg, reg_idx, bit_idx;
 684	unsigned int vf_abs_id, i;
 685	struct device *dev;
 686	struct ice_hw *hw;
 687
 688	dev = ice_pf_to_dev(pf);
 689	hw = &pf->hw;
 690	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 691
 692	/* In the case of a VFLR, HW has already reset the VF and we just need
 693	 * to clean up. Otherwise we must first trigger the reset using the
 694	 * VFRTRIG register.
 695	 */
 696	if (!is_vflr) {
 697		reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 698		reg |= VPGEN_VFRTRIG_VFSWR_M;
 699		wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 700	}
 701
 702	/* clear the VFLR bit in GLGEN_VFLRSTAT */
 703	reg_idx = (vf_abs_id) / 32;
 704	bit_idx = (vf_abs_id) % 32;
 705	wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 706	ice_flush(hw);
 707
 708	wr32(hw, PF_PCI_CIAA,
 709	     VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
 710	for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
 711		reg = rd32(hw, PF_PCI_CIAD);
 712		/* no transactions pending so stop polling */
 713		if ((reg & VF_TRANS_PENDING_M) == 0)
 714			break;
 715
 716		dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
 717		udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
 718	}
 719}
 720
 721/**
 722 * ice_sriov_poll_reset_status - poll SRIOV VF reset status
 723 * @vf: pointer to VF structure
 724 *
 725 * Returns true when reset is successful, else returns false
 726 */
 727static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
 728{
 729	struct ice_pf *pf = vf->pf;
 730	unsigned int i;
 731	u32 reg;
 732
 733	for (i = 0; i < 10; i++) {
 734		/* VF reset requires driver to first reset the VF and then
 735		 * poll the status register to make sure that the reset
 736		 * completed successfully.
 737		 */
 738		reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
 739		if (reg & VPGEN_VFRSTAT_VFRD_M)
 740			return true;
 741
 742		/* only sleep if the reset is not done */
 743		usleep_range(10, 20);
 744	}
 745	return false;
 746}
 747
 748/**
 749 * ice_sriov_clear_reset_trigger - enable VF to access hardware
 750 * @vf: VF to enabled hardware access for
 751 */
 752static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
 753{
 754	struct ice_hw *hw = &vf->pf->hw;
 755	u32 reg;
 756
 757	reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 758	reg &= ~VPGEN_VFRTRIG_VFSWR_M;
 759	wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 760	ice_flush(hw);
 761}
 762
 763/**
 764 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
 765 * @vf: VF to perform tasks on
 766 */
 767static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
 768{
 769	ice_ena_vf_mappings(vf);
 770	wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
 771}
 772
 773static const struct ice_vf_ops ice_sriov_vf_ops = {
 774	.reset_type = ICE_VF_RESET,
 775	.free = ice_sriov_free_vf,
 776	.clear_reset_state = ice_sriov_clear_reset_state,
 777	.clear_mbx_register = ice_sriov_clear_mbx_register,
 778	.trigger_reset_register = ice_sriov_trigger_reset_register,
 779	.poll_reset_status = ice_sriov_poll_reset_status,
 780	.clear_reset_trigger = ice_sriov_clear_reset_trigger,
 781	.irq_close = NULL,
 782	.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
 783};
 784
 785/**
 786 * ice_create_vf_entries - Allocate and insert VF entries
 787 * @pf: pointer to the PF structure
 788 * @num_vfs: the number of VFs to allocate
 789 *
 790 * Allocate new VF entries and insert them into the hash table. Set some
 791 * basic default fields for initializing the new VFs.
 792 *
 793 * After this function exits, the hash table will have num_vfs entries
 794 * inserted.
 795 *
 796 * Returns 0 on success or an integer error code on failure.
 797 */
 798static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
 799{
 800	struct pci_dev *pdev = pf->pdev;
 801	struct ice_vfs *vfs = &pf->vfs;
 802	struct pci_dev *vfdev = NULL;
 803	struct ice_vf *vf;
 804	u16 vf_pdev_id;
 805	int err, pos;
 806
 807	lockdep_assert_held(&vfs->table_lock);
 808
 809	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
 810	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id);
 811
 812	for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) {
 813		vf = kzalloc(sizeof(*vf), GFP_KERNEL);
 814		if (!vf) {
 815			err = -ENOMEM;
 816			goto err_free_entries;
 817		}
 818		kref_init(&vf->refcnt);
 819
 820		vf->pf = pf;
 821		vf->vf_id = vf_id;
 822
 823		/* set sriov vf ops for VFs created during SRIOV flow */
 824		vf->vf_ops = &ice_sriov_vf_ops;
 825
 826		ice_initialize_vf_entry(vf);
 827
 828		do {
 829			vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev);
 830		} while (vfdev && vfdev->physfn != pdev);
 831		vf->vfdev = vfdev;
 832		vf->vf_sw_id = pf->first_sw;
 833
 834		pci_dev_get(vfdev);
 835
 836		/* set default number of MSI-X */
 837		vf->num_msix = pf->vfs.num_msix_per;
 838		vf->num_vf_qs = pf->vfs.num_qps_per;
 839		ice_vc_set_default_allowlist(vf);
 840
 841		hash_add_rcu(vfs->table, &vf->entry, vf_id);
 842	}
 843
 844	/* Decrement of refcount done by pci_get_device() inside the loop does
 845	 * not touch the last iteration's vfdev, so it has to be done manually
 846	 * to balance pci_dev_get() added within the loop.
 847	 */
 848	pci_dev_put(vfdev);
 849
 850	return 0;
 851
 852err_free_entries:
 853	ice_free_vf_entries(pf);
 854	return err;
 855}
 856
 857/**
 858 * ice_ena_vfs - enable VFs so they are ready to be used
 859 * @pf: pointer to the PF structure
 860 * @num_vfs: number of VFs to enable
 861 */
 862static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
 863{
 864	int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
 865	struct device *dev = ice_pf_to_dev(pf);
 866	struct ice_hw *hw = &pf->hw;
 867	int ret;
 868
 869	pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
 870	if (!pf->sriov_irq_bm)
 871		return -ENOMEM;
 872	pf->sriov_irq_size = total_vectors;
 873
 874	/* Disable global interrupt 0 so we don't try to handle the VFLR. */
 875	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
 876	     ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
 877	set_bit(ICE_OICR_INTR_DIS, pf->state);
 878	ice_flush(hw);
 879
 880	ret = pci_enable_sriov(pf->pdev, num_vfs);
 881	if (ret)
 882		goto err_unroll_intr;
 883
 884	mutex_lock(&pf->vfs.table_lock);
 885
 886	ret = ice_set_per_vf_res(pf, num_vfs);
 887	if (ret) {
 888		dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
 889			num_vfs, ret);
 890		goto err_unroll_sriov;
 891	}
 892
 893	ret = ice_create_vf_entries(pf, num_vfs);
 894	if (ret) {
 895		dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
 896			num_vfs);
 897		goto err_unroll_sriov;
 898	}
 899
 900	ice_eswitch_reserve_cp_queues(pf, num_vfs);
 901	ret = ice_start_vfs(pf);
 902	if (ret) {
 903		dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
 904		ret = -EAGAIN;
 905		goto err_unroll_vf_entries;
 906	}
 907
 908	clear_bit(ICE_VF_DIS, pf->state);
 909
 910	/* rearm global interrupts */
 911	if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
 912		ice_irq_dynamic_ena(hw, NULL, NULL);
 913
 914	mutex_unlock(&pf->vfs.table_lock);
 915
 916	return 0;
 917
 918err_unroll_vf_entries:
 919	ice_free_vf_entries(pf);
 920err_unroll_sriov:
 921	mutex_unlock(&pf->vfs.table_lock);
 922	pci_disable_sriov(pf->pdev);
 923err_unroll_intr:
 924	/* rearm interrupts here */
 925	ice_irq_dynamic_ena(hw, NULL, NULL);
 926	clear_bit(ICE_OICR_INTR_DIS, pf->state);
 927	bitmap_free(pf->sriov_irq_bm);
 928	return ret;
 929}
 930
 931/**
 932 * ice_pci_sriov_ena - Enable or change number of VFs
 933 * @pf: pointer to the PF structure
 934 * @num_vfs: number of VFs to allocate
 935 *
 936 * Returns 0 on success and negative on failure
 937 */
 938static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
 939{
 940	struct device *dev = ice_pf_to_dev(pf);
 941	int err;
 942
 943	if (!num_vfs) {
 944		ice_free_vfs(pf);
 945		return 0;
 946	}
 947
 948	if (num_vfs > pf->vfs.num_supported) {
 949		dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
 950			num_vfs, pf->vfs.num_supported);
 951		return -EOPNOTSUPP;
 952	}
 953
 954	dev_info(dev, "Enabling %d VFs\n", num_vfs);
 955	err = ice_ena_vfs(pf, num_vfs);
 956	if (err) {
 957		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
 958		return err;
 959	}
 960
 961	set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 962	return 0;
 963}
 964
 965/**
 966 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
 967 * @pf: PF to enabled SR-IOV on
 968 */
 969static int ice_check_sriov_allowed(struct ice_pf *pf)
 970{
 971	struct device *dev = ice_pf_to_dev(pf);
 972
 973	if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
 974		dev_err(dev, "This device is not capable of SR-IOV\n");
 975		return -EOPNOTSUPP;
 976	}
 977
 978	if (ice_is_safe_mode(pf)) {
 979		dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
 980		return -EOPNOTSUPP;
 981	}
 982
 983	if (!ice_pf_state_is_nominal(pf)) {
 984		dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
 985		return -EBUSY;
 986	}
 987
 988	return 0;
 989}
 990
 991/**
 992 * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs
 993 * @pdev: pointer to pci_dev struct
 994 *
 995 * The function is called via sysfs ops
 996 */
 997u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
 998{
 999	struct ice_pf *pf = pci_get_drvdata(pdev);
1000
1001	return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
1002}
1003
1004static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
1005{
1006	if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
1007		return -ENOMEM;
1008
1009	pf->sriov_base_vector -= move;
1010	return 0;
1011}
1012
1013static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
1014{
1015	u16 vf_ids[ICE_MAX_SRIOV_VFS];
1016	struct ice_vf *tmp_vf;
1017	int to_remap = 0, bkt;
1018
1019	/* For better irqs usage try to remap irqs of VFs
1020	 * that aren't running yet
1021	 */
1022	ice_for_each_vf(pf, bkt, tmp_vf) {
1023		/* skip VF which is changing the number of MSI-X */
1024		if (restricted_id == tmp_vf->vf_id ||
1025		    test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states))
1026			continue;
1027
1028		ice_dis_vf_mappings(tmp_vf);
1029		ice_sriov_free_irqs(pf, tmp_vf);
1030
1031		vf_ids[to_remap] = tmp_vf->vf_id;
1032		to_remap += 1;
1033	}
1034
1035	for (int i = 0; i < to_remap; i++) {
1036		tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]);
1037		if (!tmp_vf)
1038			continue;
1039
1040		tmp_vf->first_vector_idx =
1041			ice_sriov_get_irqs(pf, tmp_vf->num_msix);
1042		/* there is no need to rebuild VSI as we are only changing the
1043		 * vector indexes not amount of MSI-X or queues
1044		 */
1045		ice_ena_vf_mappings(tmp_vf);
1046		ice_put_vf(tmp_vf);
1047	}
1048}
1049
1050/**
1051 * ice_sriov_set_msix_vec_count
1052 * @vf_dev: pointer to pci_dev struct of VF device
1053 * @msix_vec_count: new value for MSI-X amount on this VF
1054 *
1055 * Set requested MSI-X, queues and registers for @vf_dev.
1056 *
1057 * First do some sanity checks like if there are any VFs, if the new value
1058 * is correct etc. Then disable old mapping (MSI-X and queues registers), change
1059 * MSI-X and queues, rebuild VSI and enable new mapping.
1060 *
1061 * If it is possible (driver not binded to VF) try to remap also other VFs to
1062 * linearize irqs register usage.
1063 */
1064int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
1065{
1066	struct pci_dev *pdev = pci_physfn(vf_dev);
1067	struct ice_pf *pf = pci_get_drvdata(pdev);
1068	u16 prev_msix, prev_queues, queues;
1069	bool needs_rebuild = false;
1070	struct ice_vsi *vsi;
1071	struct ice_vf *vf;
1072	int id;
1073
1074	if (!ice_get_num_vfs(pf))
1075		return -ENOENT;
1076
1077	if (!msix_vec_count)
1078		return 0;
1079
1080	queues = msix_vec_count;
1081	/* add 1 MSI-X for OICR */
1082	msix_vec_count += 1;
1083
1084	if (queues > min(ice_get_avail_txq_count(pf),
1085			 ice_get_avail_rxq_count(pf)))
1086		return -EINVAL;
1087
1088	if (msix_vec_count < ICE_MIN_INTR_PER_VF)
1089		return -EINVAL;
1090
1091	/* Transition of PCI VF function number to function_id */
1092	for (id = 0; id < pci_num_vf(pdev); id++) {
1093		if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
1094			break;
1095	}
1096
1097	if (id == pci_num_vf(pdev))
1098		return -ENOENT;
1099
1100	vf = ice_get_vf_by_id(pf, id);
1101
1102	if (!vf)
1103		return -ENOENT;
1104
1105	vsi = ice_get_vf_vsi(vf);
1106	if (!vsi)
1107		return -ENOENT;
1108
1109	prev_msix = vf->num_msix;
1110	prev_queues = vf->num_vf_qs;
1111
1112	if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
1113		ice_put_vf(vf);
1114		return -ENOSPC;
1115	}
1116
1117	ice_dis_vf_mappings(vf);
1118	ice_sriov_free_irqs(pf, vf);
1119
1120	/* Remap all VFs beside the one is now configured */
1121	ice_sriov_remap_vectors(pf, vf->vf_id);
1122
1123	vf->num_msix = msix_vec_count;
1124	vf->num_vf_qs = queues;
1125	vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
1126	if (vf->first_vector_idx < 0)
1127		goto unroll;
1128
1129	if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
1130		/* Try to rebuild with previous values */
1131		needs_rebuild = true;
1132		goto unroll;
1133	}
1134
1135	dev_info(ice_pf_to_dev(pf),
1136		 "Changing VF %d resources to %d vectors and %d queues\n",
1137		 vf->vf_id, vf->num_msix, vf->num_vf_qs);
1138
1139	ice_ena_vf_mappings(vf);
1140	ice_put_vf(vf);
1141
1142	return 0;
1143
1144unroll:
1145	dev_info(ice_pf_to_dev(pf),
1146		 "Can't set %d vectors on VF %d, falling back to %d\n",
1147		 vf->num_msix, vf->vf_id, prev_msix);
1148
1149	vf->num_msix = prev_msix;
1150	vf->num_vf_qs = prev_queues;
1151	vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
1152	if (vf->first_vector_idx < 0)
1153		return -EINVAL;
1154
1155	if (needs_rebuild) {
1156		ice_vf_reconfig_vsi(vf);
1157		ice_vf_init_host_cfg(vf, vsi);
1158	}
1159
1160	ice_ena_vf_mappings(vf);
1161	ice_put_vf(vf);
1162
1163	return -EINVAL;
1164}
1165
1166/**
1167 * ice_sriov_configure - Enable or change number of VFs via sysfs
1168 * @pdev: pointer to a pci_dev structure
1169 * @num_vfs: number of VFs to allocate or 0 to free VFs
1170 *
1171 * This function is called when the user updates the number of VFs in sysfs. On
1172 * success return whatever num_vfs was set to by the caller. Return negative on
1173 * failure.
1174 */
1175int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1176{
1177	struct ice_pf *pf = pci_get_drvdata(pdev);
1178	struct device *dev = ice_pf_to_dev(pf);
1179	int err;
1180
1181	err = ice_check_sriov_allowed(pf);
1182	if (err)
1183		return err;
1184
1185	if (!num_vfs) {
1186		if (!pci_vfs_assigned(pdev)) {
1187			ice_free_vfs(pf);
1188			return 0;
1189		}
1190
1191		dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1192		return -EBUSY;
1193	}
1194
1195	err = ice_pci_sriov_ena(pf, num_vfs);
1196	if (err)
1197		return err;
1198
1199	return num_vfs;
1200}
1201
1202/**
1203 * ice_process_vflr_event - Free VF resources via IRQ calls
1204 * @pf: pointer to the PF structure
1205 *
1206 * called from the VFLR IRQ handler to
1207 * free up VF resources and state variables
1208 */
1209void ice_process_vflr_event(struct ice_pf *pf)
1210{
1211	struct ice_hw *hw = &pf->hw;
1212	struct ice_vf *vf;
1213	unsigned int bkt;
1214	u32 reg;
1215
1216	if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
1217	    !ice_has_vfs(pf))
1218		return;
1219
1220	mutex_lock(&pf->vfs.table_lock);
1221	ice_for_each_vf(pf, bkt, vf) {
1222		u32 reg_idx, bit_idx;
1223
1224		reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1225		bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1226		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
1227		reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1228		if (reg & BIT(bit_idx))
1229			/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1230			ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
1231	}
1232	mutex_unlock(&pf->vfs.table_lock);
1233}
1234
1235/**
1236 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1237 * @pf: PF used to index all VFs
1238 * @pfq: queue index relative to the PF's function space
1239 *
1240 * If no VF is found who owns the pfq then return NULL, otherwise return a
1241 * pointer to the VF who owns the pfq
1242 *
1243 * If this function returns non-NULL, it acquires a reference count of the VF
1244 * structure. The caller is responsible for calling ice_put_vf() to drop this
1245 * reference.
1246 */
1247static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1248{
1249	struct ice_vf *vf;
1250	unsigned int bkt;
1251
1252	rcu_read_lock();
1253	ice_for_each_vf_rcu(pf, bkt, vf) {
1254		struct ice_vsi *vsi;
1255		u16 rxq_idx;
1256
1257		vsi = ice_get_vf_vsi(vf);
1258		if (!vsi)
1259			continue;
1260
1261		ice_for_each_rxq(vsi, rxq_idx)
1262			if (vsi->rxq_map[rxq_idx] == pfq) {
1263				struct ice_vf *found;
1264
1265				if (kref_get_unless_zero(&vf->refcnt))
1266					found = vf;
1267				else
1268					found = NULL;
1269				rcu_read_unlock();
1270				return found;
1271			}
1272	}
1273	rcu_read_unlock();
1274
1275	return NULL;
1276}
1277
1278/**
1279 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1280 * @pf: PF used for conversion
1281 * @globalq: global queue index used to convert to PF space queue index
1282 */
1283static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1284{
1285	return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1286}
1287
1288/**
1289 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1290 * @pf: PF that the LAN overflow event happened on
1291 * @event: structure holding the event information for the LAN overflow event
1292 *
1293 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1294 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1295 * reset on the offending VF.
1296 */
1297void
1298ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1299{
1300	u32 gldcb_rtctq, queue;
1301	struct ice_vf *vf;
1302
1303	gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1304	dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1305
1306	/* event returns device global Rx queue number */
1307	queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq);
1308
1309	vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1310	if (!vf)
1311		return;
1312
1313	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1314	ice_put_vf(vf);
1315}
1316
1317/**
1318 * ice_set_vf_spoofchk
1319 * @netdev: network interface device structure
1320 * @vf_id: VF identifier
1321 * @ena: flag to enable or disable feature
1322 *
1323 * Enable or disable VF spoof checking
1324 */
1325int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1326{
1327	struct ice_netdev_priv *np = netdev_priv(netdev);
1328	struct ice_pf *pf = np->vsi->back;
1329	struct ice_vsi *vf_vsi;
1330	struct device *dev;
1331	struct ice_vf *vf;
1332	int ret;
1333
1334	dev = ice_pf_to_dev(pf);
1335
1336	vf = ice_get_vf_by_id(pf, vf_id);
1337	if (!vf)
1338		return -EINVAL;
1339
1340	ret = ice_check_vf_ready_for_cfg(vf);
1341	if (ret)
1342		goto out_put_vf;
1343
1344	vf_vsi = ice_get_vf_vsi(vf);
1345	if (!vf_vsi) {
1346		netdev_err(netdev, "VSI %d for VF %d is null\n",
1347			   vf->lan_vsi_idx, vf->vf_id);
1348		ret = -EINVAL;
1349		goto out_put_vf;
1350	}
1351
1352	if (vf_vsi->type != ICE_VSI_VF) {
1353		netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1354			   vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1355		ret = -ENODEV;
1356		goto out_put_vf;
1357	}
1358
1359	if (ena == vf->spoofchk) {
1360		dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1361		ret = 0;
1362		goto out_put_vf;
1363	}
1364
1365	ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
1366	if (ret)
1367		dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
1368			ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
1369	else
1370		vf->spoofchk = ena;
1371
1372out_put_vf:
1373	ice_put_vf(vf);
1374	return ret;
1375}
1376
1377/**
1378 * ice_get_vf_cfg
1379 * @netdev: network interface device structure
1380 * @vf_id: VF identifier
1381 * @ivi: VF configuration structure
1382 *
1383 * return VF configuration
1384 */
1385int
1386ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
1387{
1388	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1389	struct ice_vf *vf;
1390	int ret;
1391
1392	vf = ice_get_vf_by_id(pf, vf_id);
1393	if (!vf)
1394		return -EINVAL;
1395
1396	ret = ice_check_vf_ready_for_cfg(vf);
1397	if (ret)
1398		goto out_put_vf;
1399
1400	ivi->vf = vf_id;
1401	ether_addr_copy(ivi->mac, vf->hw_lan_addr);
1402
1403	/* VF configuration for VLAN and applicable QoS */
1404	ivi->vlan = ice_vf_get_port_vlan_id(vf);
1405	ivi->qos = ice_vf_get_port_vlan_prio(vf);
1406	if (ice_vf_is_port_vlan_ena(vf))
1407		ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
1408
1409	ivi->trusted = vf->trusted;
1410	ivi->spoofchk = vf->spoofchk;
1411	if (!vf->link_forced)
1412		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
1413	else if (vf->link_up)
1414		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
1415	else
1416		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
1417	ivi->max_tx_rate = vf->max_tx_rate;
1418	ivi->min_tx_rate = vf->min_tx_rate;
1419
1420out_put_vf:
1421	ice_put_vf(vf);
1422	return ret;
1423}
1424
1425/**
1426 * ice_set_vf_mac
1427 * @netdev: network interface device structure
1428 * @vf_id: VF identifier
1429 * @mac: MAC address
1430 *
1431 * program VF MAC address
1432 */
1433int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1434{
1435	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1436	struct ice_vf *vf;
1437	int ret;
1438
1439	if (is_multicast_ether_addr(mac)) {
1440		netdev_err(netdev, "%pM not a valid unicast address\n", mac);
1441		return -EINVAL;
1442	}
1443
1444	vf = ice_get_vf_by_id(pf, vf_id);
1445	if (!vf)
1446		return -EINVAL;
1447
1448	/* nothing left to do, unicast MAC already set */
1449	if (ether_addr_equal(vf->dev_lan_addr, mac) &&
1450	    ether_addr_equal(vf->hw_lan_addr, mac)) {
1451		ret = 0;
1452		goto out_put_vf;
1453	}
1454
1455	ret = ice_check_vf_ready_for_cfg(vf);
1456	if (ret)
1457		goto out_put_vf;
1458
1459	mutex_lock(&vf->cfg_lock);
1460
1461	/* VF is notified of its new MAC via the PF's response to the
1462	 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
1463	 */
1464	ether_addr_copy(vf->dev_lan_addr, mac);
1465	ether_addr_copy(vf->hw_lan_addr, mac);
1466	if (is_zero_ether_addr(mac)) {
1467		/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
1468		vf->pf_set_mac = false;
1469		netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
1470			    vf->vf_id);
1471	} else {
1472		/* PF will add MAC rule for the VF */
1473		vf->pf_set_mac = true;
1474		netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
1475			    mac, vf_id);
1476	}
1477
1478	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1479	mutex_unlock(&vf->cfg_lock);
1480
1481out_put_vf:
1482	ice_put_vf(vf);
1483	return ret;
1484}
1485
1486/**
1487 * ice_set_vf_trust
1488 * @netdev: network interface device structure
1489 * @vf_id: VF identifier
1490 * @trusted: Boolean value to enable/disable trusted VF
1491 *
1492 * Enable or disable a given VF as trusted
1493 */
1494int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
1495{
1496	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1497	struct ice_vf *vf;
1498	int ret;
1499
1500	vf = ice_get_vf_by_id(pf, vf_id);
1501	if (!vf)
1502		return -EINVAL;
1503
1504	if (ice_is_eswitch_mode_switchdev(pf)) {
1505		dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
1506		return -EOPNOTSUPP;
1507	}
1508
1509	ret = ice_check_vf_ready_for_cfg(vf);
1510	if (ret)
1511		goto out_put_vf;
1512
1513	/* Check if already trusted */
1514	if (trusted == vf->trusted) {
1515		ret = 0;
1516		goto out_put_vf;
1517	}
1518
1519	mutex_lock(&vf->cfg_lock);
1520
1521	vf->trusted = trusted;
1522	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1523	dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1524		 vf_id, trusted ? "" : "un");
1525
1526	mutex_unlock(&vf->cfg_lock);
1527
1528out_put_vf:
1529	ice_put_vf(vf);
1530	return ret;
1531}
1532
1533/**
1534 * ice_set_vf_link_state
1535 * @netdev: network interface device structure
1536 * @vf_id: VF identifier
1537 * @link_state: required link state
1538 *
1539 * Set VF's link state, irrespective of physical link state status
1540 */
1541int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
1542{
1543	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1544	struct ice_vf *vf;
1545	int ret;
1546
1547	vf = ice_get_vf_by_id(pf, vf_id);
1548	if (!vf)
1549		return -EINVAL;
1550
1551	ret = ice_check_vf_ready_for_cfg(vf);
1552	if (ret)
1553		goto out_put_vf;
1554
1555	switch (link_state) {
1556	case IFLA_VF_LINK_STATE_AUTO:
1557		vf->link_forced = false;
1558		break;
1559	case IFLA_VF_LINK_STATE_ENABLE:
1560		vf->link_forced = true;
1561		vf->link_up = true;
1562		break;
1563	case IFLA_VF_LINK_STATE_DISABLE:
1564		vf->link_forced = true;
1565		vf->link_up = false;
1566		break;
1567	default:
1568		ret = -EINVAL;
1569		goto out_put_vf;
1570	}
1571
1572	ice_vc_notify_vf_link_state(vf);
1573
1574out_put_vf:
1575	ice_put_vf(vf);
1576	return ret;
1577}
1578
1579/**
1580 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
1581 * @pf: PF associated with VFs
1582 */
1583static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
1584{
1585	struct ice_vf *vf;
1586	unsigned int bkt;
1587	int rate = 0;
1588
1589	rcu_read_lock();
1590	ice_for_each_vf_rcu(pf, bkt, vf)
1591		rate += vf->min_tx_rate;
1592	rcu_read_unlock();
1593
1594	return rate;
1595}
1596
1597/**
1598 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
1599 * @vf: VF trying to configure min_tx_rate
1600 * @min_tx_rate: min Tx rate in Mbps
1601 *
1602 * Check if the min_tx_rate being passed in will cause oversubscription of total
1603 * min_tx_rate based on the current link speed and all other VFs configured
1604 * min_tx_rate
1605 *
1606 * Return true if the passed min_tx_rate would cause oversubscription, else
1607 * return false
1608 */
1609static bool
1610ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
1611{
1612	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1613	int all_vfs_min_tx_rate;
1614	int link_speed_mbps;
1615
1616	if (WARN_ON(!vsi))
1617		return false;
1618
1619	link_speed_mbps = ice_get_link_speed_mbps(vsi);
1620	all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
1621
1622	/* this VF's previous rate is being overwritten */
1623	all_vfs_min_tx_rate -= vf->min_tx_rate;
1624
1625	if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
1626		dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
1627			min_tx_rate, vf->vf_id,
1628			all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
1629			link_speed_mbps);
1630		return true;
1631	}
1632
1633	return false;
1634}
1635
1636/**
1637 * ice_set_vf_bw - set min/max VF bandwidth
1638 * @netdev: network interface device structure
1639 * @vf_id: VF identifier
1640 * @min_tx_rate: Minimum Tx rate in Mbps
1641 * @max_tx_rate: Maximum Tx rate in Mbps
1642 */
1643int
1644ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
1645	      int max_tx_rate)
1646{
1647	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1648	struct ice_vsi *vsi;
1649	struct device *dev;
1650	struct ice_vf *vf;
1651	int ret;
1652
1653	dev = ice_pf_to_dev(pf);
1654
1655	vf = ice_get_vf_by_id(pf, vf_id);
1656	if (!vf)
1657		return -EINVAL;
1658
1659	ret = ice_check_vf_ready_for_cfg(vf);
1660	if (ret)
1661		goto out_put_vf;
1662
1663	vsi = ice_get_vf_vsi(vf);
1664	if (!vsi) {
1665		ret = -EINVAL;
1666		goto out_put_vf;
1667	}
1668
1669	if (min_tx_rate && ice_is_dcb_active(pf)) {
1670		dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
1671		ret = -EOPNOTSUPP;
1672		goto out_put_vf;
1673	}
1674
1675	if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
1676		ret = -EINVAL;
1677		goto out_put_vf;
1678	}
1679
1680	if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
1681		ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
1682		if (ret) {
1683			dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
1684				vf->vf_id);
1685			goto out_put_vf;
1686		}
1687
1688		vf->min_tx_rate = min_tx_rate;
1689	}
1690
1691	if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
1692		ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
1693		if (ret) {
1694			dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
1695				vf->vf_id);
1696			goto out_put_vf;
1697		}
1698
1699		vf->max_tx_rate = max_tx_rate;
1700	}
1701
1702out_put_vf:
1703	ice_put_vf(vf);
1704	return ret;
1705}
1706
1707/**
1708 * ice_get_vf_stats - populate some stats for the VF
1709 * @netdev: the netdev of the PF
1710 * @vf_id: the host OS identifier (0-255)
1711 * @vf_stats: pointer to the OS memory to be initialized
1712 */
1713int ice_get_vf_stats(struct net_device *netdev, int vf_id,
1714		     struct ifla_vf_stats *vf_stats)
1715{
1716	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1717	struct ice_eth_stats *stats;
1718	struct ice_vsi *vsi;
1719	struct ice_vf *vf;
1720	int ret;
1721
1722	vf = ice_get_vf_by_id(pf, vf_id);
1723	if (!vf)
1724		return -EINVAL;
1725
1726	ret = ice_check_vf_ready_for_cfg(vf);
1727	if (ret)
1728		goto out_put_vf;
1729
1730	vsi = ice_get_vf_vsi(vf);
1731	if (!vsi) {
1732		ret = -EINVAL;
1733		goto out_put_vf;
1734	}
1735
1736	ice_update_eth_stats(vsi);
1737	stats = &vsi->eth_stats;
1738
1739	memset(vf_stats, 0, sizeof(*vf_stats));
1740
1741	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
1742		stats->rx_multicast;
1743	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
1744		stats->tx_multicast;
1745	vf_stats->rx_bytes   = stats->rx_bytes;
1746	vf_stats->tx_bytes   = stats->tx_bytes;
1747	vf_stats->broadcast  = stats->rx_broadcast;
1748	vf_stats->multicast  = stats->rx_multicast;
1749	vf_stats->rx_dropped = stats->rx_discards;
1750	vf_stats->tx_dropped = stats->tx_discards;
1751
1752out_put_vf:
1753	ice_put_vf(vf);
1754	return ret;
1755}
1756
1757/**
1758 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
1759 * @hw: hardware structure used to check the VLAN mode
1760 * @vlan_proto: VLAN TPID being checked
1761 *
1762 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
1763 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
1764 * Mode (SVM), then only ETH_P_8021Q is supported.
1765 */
1766static bool
1767ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
1768{
1769	bool is_supported = false;
1770
1771	switch (vlan_proto) {
1772	case ETH_P_8021Q:
1773		is_supported = true;
1774		break;
1775	case ETH_P_8021AD:
1776		if (ice_is_dvm_ena(hw))
1777			is_supported = true;
1778		break;
1779	}
1780
1781	return is_supported;
1782}
1783
1784/**
1785 * ice_set_vf_port_vlan
1786 * @netdev: network interface device structure
1787 * @vf_id: VF identifier
1788 * @vlan_id: VLAN ID being set
1789 * @qos: priority setting
1790 * @vlan_proto: VLAN protocol
1791 *
1792 * program VF Port VLAN ID and/or QoS
1793 */
1794int
1795ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1796		     __be16 vlan_proto)
1797{
1798	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1799	u16 local_vlan_proto = ntohs(vlan_proto);
1800	struct device *dev;
1801	struct ice_vf *vf;
1802	int ret;
1803
1804	dev = ice_pf_to_dev(pf);
1805
1806	if (vlan_id >= VLAN_N_VID || qos > 7) {
1807		dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
1808			vf_id, vlan_id, qos);
1809		return -EINVAL;
1810	}
1811
1812	if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
1813		dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
1814			local_vlan_proto);
1815		return -EPROTONOSUPPORT;
1816	}
1817
1818	vf = ice_get_vf_by_id(pf, vf_id);
1819	if (!vf)
1820		return -EINVAL;
1821
1822	ret = ice_check_vf_ready_for_cfg(vf);
1823	if (ret)
1824		goto out_put_vf;
1825
1826	if (ice_vf_get_port_vlan_prio(vf) == qos &&
1827	    ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
1828	    ice_vf_get_port_vlan_id(vf) == vlan_id) {
1829		/* duplicate request, so just return success */
1830		dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
1831			vlan_id, qos, local_vlan_proto);
1832		ret = 0;
1833		goto out_put_vf;
1834	}
1835
1836	mutex_lock(&vf->cfg_lock);
1837
1838	vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
1839	if (ice_vf_is_port_vlan_ena(vf))
1840		dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
1841			 vlan_id, qos, local_vlan_proto, vf_id);
1842	else
1843		dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
1844
1845	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1846	mutex_unlock(&vf->cfg_lock);
1847
1848out_put_vf:
1849	ice_put_vf(vf);
1850	return ret;
1851}
1852
1853/**
1854 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
1855 * @vf: pointer to the VF structure
1856 */
1857void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
1858{
1859	struct ice_pf *pf = vf->pf;
1860	struct device *dev;
1861
1862	dev = ice_pf_to_dev(pf);
1863
1864	dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1865		 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
1866		 vf->dev_lan_addr,
1867		 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1868			  ? "on" : "off");
1869}
1870
1871/**
1872 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
1873 * @pf: pointer to the PF structure
1874 *
1875 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
1876 */
1877void ice_print_vfs_mdd_events(struct ice_pf *pf)
1878{
1879	struct device *dev = ice_pf_to_dev(pf);
1880	struct ice_hw *hw = &pf->hw;
1881	struct ice_vf *vf;
1882	unsigned int bkt;
1883
1884	/* check that there are pending MDD events to print */
1885	if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
1886		return;
1887
1888	/* VF MDD event logs are rate limited to one second intervals */
1889	if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
1890		return;
1891
1892	pf->vfs.last_printed_mdd_jiffies = jiffies;
1893
1894	mutex_lock(&pf->vfs.table_lock);
1895	ice_for_each_vf(pf, bkt, vf) {
1896		/* only print Rx MDD event message if there are new events */
1897		if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
1898			vf->mdd_rx_events.last_printed =
1899							vf->mdd_rx_events.count;
1900			ice_print_vf_rx_mdd_event(vf);
1901		}
1902
1903		/* only print Tx MDD event message if there are new events */
1904		if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
1905			vf->mdd_tx_events.last_printed =
1906							vf->mdd_tx_events.count;
1907
1908			dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
1909				 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
1910				 vf->dev_lan_addr);
1911		}
1912	}
1913	mutex_unlock(&pf->vfs.table_lock);
1914}
1915
1916/**
1917 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1918 * @pf: pointer to the PF structure
1919 *
1920 * Called when recovering from a PF FLR to restore interrupt capability to
1921 * the VFs.
1922 */
1923void ice_restore_all_vfs_msi_state(struct ice_pf *pf)
1924{
1925	struct ice_vf *vf;
1926	u32 bkt;
1927
1928	ice_for_each_vf(pf, bkt, vf)
1929		pci_restore_msi_state(vf->vfdev);
1930}