Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2022, Intel Corporation. */
   3
   4#include "ice_vf_lib_private.h"
   5#include "ice.h"
   6#include "ice_lib.h"
   7#include "ice_fltr.h"
   8#include "ice_virtchnl_allowlist.h"
   9
  10/* Public functions which may be accessed by all driver files */
  11
  12/**
  13 * ice_get_vf_by_id - Get pointer to VF by ID
  14 * @pf: the PF private structure
  15 * @vf_id: the VF ID to locate
  16 *
  17 * Locate and return a pointer to the VF structure associated with a given ID.
  18 * Returns NULL if the ID does not have a valid VF structure associated with
  19 * it.
  20 *
  21 * This function takes a reference to the VF, which must be released by
  22 * calling ice_put_vf() once the caller is finished accessing the VF structure
  23 * returned.
  24 */
  25struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
  26{
  27	struct ice_vf *vf;
  28
  29	rcu_read_lock();
  30	hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
  31		if (vf->vf_id == vf_id) {
  32			struct ice_vf *found;
  33
  34			if (kref_get_unless_zero(&vf->refcnt))
  35				found = vf;
  36			else
  37				found = NULL;
  38
  39			rcu_read_unlock();
  40			return found;
  41		}
  42	}
  43	rcu_read_unlock();
  44
  45	return NULL;
  46}
  47
  48/**
  49 * ice_release_vf - Release VF associated with a refcount
  50 * @ref: the kref decremented to zero
  51 *
  52 * Callback function for kref_put to release a VF once its reference count has
  53 * hit zero.
  54 */
  55static void ice_release_vf(struct kref *ref)
  56{
  57	struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
  58
  59	pci_dev_put(vf->vfdev);
  60
  61	vf->vf_ops->free(vf);
  62}
  63
  64/**
  65 * ice_put_vf - Release a reference to a VF
  66 * @vf: the VF structure to decrease reference count on
  67 *
  68 * Decrease the reference count for a VF, and free the entry if it is no
  69 * longer in use.
  70 *
  71 * This must be called after ice_get_vf_by_id() once the reference to the VF
  72 * structure is no longer used. Otherwise, the VF structure will never be
  73 * freed.
  74 */
  75void ice_put_vf(struct ice_vf *vf)
  76{
  77	kref_put(&vf->refcnt, ice_release_vf);
  78}
  79
  80/**
  81 * ice_has_vfs - Return true if the PF has any associated VFs
  82 * @pf: the PF private structure
  83 *
  84 * Return whether or not the PF has any allocated VFs.
  85 *
  86 * Note that this function only guarantees that there are no VFs at the point
  87 * of calling it. It does not guarantee that no more VFs will be added.
  88 */
  89bool ice_has_vfs(struct ice_pf *pf)
  90{
  91	/* A simple check that the hash table is not empty does not require
  92	 * the mutex or rcu_read_lock.
  93	 */
  94	return !hash_empty(pf->vfs.table);
  95}
  96
  97/**
  98 * ice_get_num_vfs - Get number of allocated VFs
  99 * @pf: the PF private structure
 100 *
 101 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
 102 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
 103 * the output of this function.
 104 */
 105u16 ice_get_num_vfs(struct ice_pf *pf)
 106{
 107	struct ice_vf *vf;
 108	unsigned int bkt;
 109	u16 num_vfs = 0;
 110
 111	rcu_read_lock();
 112	ice_for_each_vf_rcu(pf, bkt, vf)
 113		num_vfs++;
 114	rcu_read_unlock();
 115
 116	return num_vfs;
 117}
 118
 119/**
 120 * ice_get_vf_vsi - get VF's VSI based on the stored index
 121 * @vf: VF used to get VSI
 122 */
 123struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
 124{
 125	if (vf->lan_vsi_idx == ICE_NO_VSI)
 126		return NULL;
 127
 128	return vf->pf->vsi[vf->lan_vsi_idx];
 129}
 130
 131/**
 132 * ice_is_vf_disabled
 133 * @vf: pointer to the VF info
 134 *
 135 * If the PF has been disabled, there is no need resetting VF until PF is
 136 * active again. Similarly, if the VF has been disabled, this means something
 137 * else is resetting the VF, so we shouldn't continue.
 138 *
 139 * Returns true if the caller should consider the VF as disabled whether
 140 * because that single VF is explicitly disabled or because the PF is
 141 * currently disabled.
 142 */
 143bool ice_is_vf_disabled(struct ice_vf *vf)
 144{
 145	struct ice_pf *pf = vf->pf;
 146
 147	return (test_bit(ICE_VF_DIS, pf->state) ||
 148		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
 149}
 150
 151/**
 152 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
 153 * @vf: The VF being resseting
 154 *
 155 * The max poll time is about ~800ms, which is about the maximum time it takes
 156 * for a VF to be reset and/or a VF driver to be removed.
 157 */
 158static void ice_wait_on_vf_reset(struct ice_vf *vf)
 159{
 160	int i;
 161
 162	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
 163		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
 164			break;
 165		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
 166	}
 167}
 168
 169/**
 170 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
 171 * @vf: VF to check if it's ready to be configured/queried
 172 *
 173 * The purpose of this function is to make sure the VF is not in reset, not
 174 * disabled, and initialized so it can be configured and/or queried by a host
 175 * administrator.
 176 */
 177int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
 178{
 179	ice_wait_on_vf_reset(vf);
 180
 181	if (ice_is_vf_disabled(vf))
 182		return -EINVAL;
 183
 184	if (ice_check_vf_init(vf))
 185		return -EBUSY;
 186
 187	return 0;
 188}
 189
 190/**
 191 * ice_trigger_vf_reset - Reset a VF on HW
 192 * @vf: pointer to the VF structure
 193 * @is_vflr: true if VFLR was issued, false if not
 194 * @is_pfr: true if the reset was triggered due to a previous PFR
 195 *
 196 * Trigger hardware to start a reset for a particular VF. Expects the caller
 197 * to wait the proper amount of time to allow hardware to reset the VF before
 198 * it cleans up and restores VF functionality.
 199 */
 200static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
 201{
 202	/* Inform VF that it is no longer active, as a warning */
 203	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 204
 205	/* Disable VF's configuration API during reset. The flag is re-enabled
 206	 * when it's safe again to access VF's VSI.
 207	 */
 208	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 209
 210	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
 211	 * needs to clear them in the case of VFR/VFLR. If this is done for
 212	 * PFR, it can mess up VF resets because the VF driver may already
 213	 * have started cleanup by the time we get here.
 214	 */
 215	if (!is_pfr)
 216		vf->vf_ops->clear_mbx_register(vf);
 217
 218	vf->vf_ops->trigger_reset_register(vf, is_vflr);
 219}
 220
 221static void ice_vf_clear_counters(struct ice_vf *vf)
 222{
 223	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 224
 225	if (vsi)
 226		vsi->num_vlan = 0;
 227
 228	vf->num_mac = 0;
 229	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
 230	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
 231}
 232
 233/**
 234 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
 235 * @vf: VF to perform pre VSI rebuild tasks
 236 *
 237 * These tasks are items that don't need to be amortized since they are most
 238 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
 239 */
 240static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
 241{
 242	/* Close any IRQ mapping now */
 243	if (vf->vf_ops->irq_close)
 244		vf->vf_ops->irq_close(vf);
 245
 246	ice_vf_clear_counters(vf);
 247	vf->vf_ops->clear_reset_trigger(vf);
 248}
 249
 250/**
 251 * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
 252 * @vf: VF to reconfigure the VSI for
 253 *
 254 * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
 255 * configuration change, etc).
 256 *
 257 * It brings the VSI down and then reconfigures it with the hardware.
 258 */
 259static int ice_vf_reconfig_vsi(struct ice_vf *vf)
 260{
 261	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 
 262	struct ice_pf *pf = vf->pf;
 263	int err;
 264
 265	if (WARN_ON(!vsi))
 266		return -EINVAL;
 267
 268	vsi->flags = ICE_VSI_FLAG_NO_INIT;
 
 269
 270	ice_vsi_decfg(vsi);
 271	ice_fltr_remove_all(vsi);
 272
 273	err = ice_vsi_cfg(vsi);
 274	if (err) {
 275		dev_err(ice_pf_to_dev(pf),
 276			"Failed to reconfigure the VF%u's VSI, error %d\n",
 277			vf->vf_id, err);
 278		return err;
 279	}
 280
 
 
 
 
 
 
 281	return 0;
 282}
 283
 284/**
 285 * ice_vf_rebuild_vsi - rebuild the VF's VSI
 286 * @vf: VF to rebuild the VSI for
 287 *
 288 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
 289 * host, PFR, CORER, etc.).
 290 *
 291 * It reprograms the VSI configuration back into hardware.
 292 */
 293static int ice_vf_rebuild_vsi(struct ice_vf *vf)
 294{
 295	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 296	struct ice_pf *pf = vf->pf;
 297
 298	if (WARN_ON(!vsi))
 299		return -EINVAL;
 300
 301	if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
 302		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
 303			vf->vf_id);
 304		return -EIO;
 305	}
 306	/* vsi->idx will remain the same in this case so don't update
 307	 * vf->lan_vsi_idx
 308	 */
 309	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
 
 310
 311	return 0;
 312}
 313
 314/**
 315 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
 316 * @vf: VF to add MAC filters for
 317 * @vsi: Pointer to VSI
 318 *
 319 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 320 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
 321 */
 322static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
 323{
 324	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
 325	struct device *dev = ice_pf_to_dev(vf->pf);
 326	int err;
 327
 328	if (ice_vf_is_port_vlan_ena(vf)) {
 329		err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
 330		if (err) {
 331			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
 332				vf->vf_id, err);
 333			return err;
 334		}
 335
 336		err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
 337	} else {
 338		/* clear possible previous port vlan config */
 339		err = ice_vsi_clear_port_vlan(vsi);
 340		if (err) {
 341			dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n",
 342				vf->vf_id, err);
 343			return err;
 344		}
 345		err = ice_vsi_add_vlan_zero(vsi);
 346	}
 347
 348	if (err) {
 349		dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
 350			ice_vf_is_port_vlan_ena(vf) ?
 351			ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
 352		return err;
 353	}
 354
 355	err = vlan_ops->ena_rx_filtering(vsi);
 356	if (err)
 357		dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
 358			 vf->vf_id, vsi->idx, err);
 359
 360	return 0;
 361}
 362
 363/**
 364 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
 365 * @vf: VF to re-apply the configuration for
 366 *
 367 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
 368 * needs to re-apply the host configured Tx rate limiting configuration.
 369 */
 370static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
 371{
 372	struct device *dev = ice_pf_to_dev(vf->pf);
 373	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 374	int err;
 375
 376	if (WARN_ON(!vsi))
 377		return -EINVAL;
 378
 379	if (vf->min_tx_rate) {
 380		err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
 381		if (err) {
 382			dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
 383				vf->min_tx_rate, vf->vf_id, err);
 384			return err;
 385		}
 386	}
 387
 388	if (vf->max_tx_rate) {
 389		err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
 390		if (err) {
 391			dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
 392				vf->max_tx_rate, vf->vf_id, err);
 393			return err;
 394		}
 395	}
 396
 397	return 0;
 398}
 399
 400/**
 401 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
 402 * @vf: VF to configure trust setting for
 403 */
 404static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
 405{
 406	assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
 407}
 408
 409/**
 410 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
 411 * @vf: VF to add MAC filters for
 412 *
 413 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 414 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
 415 */
 416static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
 417{
 418	struct device *dev = ice_pf_to_dev(vf->pf);
 419	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 420	u8 broadcast[ETH_ALEN];
 421	int status;
 422
 423	if (WARN_ON(!vsi))
 424		return -EINVAL;
 425
 426	if (ice_is_eswitch_mode_switchdev(vf->pf))
 427		return 0;
 428
 429	eth_broadcast_addr(broadcast);
 430	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
 431	if (status) {
 432		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
 433			vf->vf_id, status);
 434		return status;
 435	}
 436
 437	vf->num_mac++;
 438
 439	if (is_valid_ether_addr(vf->hw_lan_addr)) {
 440		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
 441					  ICE_FWD_TO_VSI);
 442		if (status) {
 443			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
 444				&vf->hw_lan_addr[0], vf->vf_id,
 445				status);
 446			return status;
 447		}
 448		vf->num_mac++;
 449
 450		ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
 451	}
 452
 453	return 0;
 454}
 455
 456/**
 457 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
 458 * @vsi: Pointer to VSI
 459 *
 460 * This function moves VSI into corresponding scheduler aggregator node
 461 * based on cached value of "aggregator node info" per VSI
 462 */
 463static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
 464{
 465	struct ice_pf *pf = vsi->back;
 466	struct device *dev;
 467	int status;
 468
 469	if (!vsi->agg_node)
 470		return;
 471
 472	dev = ice_pf_to_dev(pf);
 473	if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
 474		dev_dbg(dev,
 475			"agg_id %u already has reached max_num_vsis %u\n",
 476			vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
 477		return;
 478	}
 479
 480	status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
 481				     vsi->idx, vsi->tc_cfg.ena_tc);
 482	if (status)
 483		dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
 484			vsi->idx, vsi->agg_node->agg_id);
 485	else
 486		vsi->agg_node->num_vsis++;
 487}
 488
 489/**
 490 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
 491 * @vf: VF to rebuild host configuration on
 492 */
 493static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
 494{
 495	struct device *dev = ice_pf_to_dev(vf->pf);
 496	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 497
 498	if (WARN_ON(!vsi))
 499		return;
 500
 501	ice_vf_set_host_trust_cfg(vf);
 502
 503	if (ice_vf_rebuild_host_mac_cfg(vf))
 504		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
 505			vf->vf_id);
 506
 507	if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
 508		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
 509			vf->vf_id);
 510
 511	if (ice_vf_rebuild_host_tx_rate_cfg(vf))
 512		dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
 513			vf->vf_id);
 514
 515	if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
 516		dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
 517			vf->vf_id);
 518
 519	/* rebuild aggregator node config for main VF VSI */
 520	ice_vf_rebuild_aggregator_node_cfg(vsi);
 521}
 522
 523/**
 524 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
 525 * @vf: pointer to the VF structure
 526 */
 527static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
 528{
 529	/* Clear Rx/Tx enabled queues flag */
 530	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
 531	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
 532	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
 533}
 534
 535/**
 536 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
 537 * @vf: VF to set in initialized state
 538 *
 539 * After this function the VF will be ready to receive/handle the
 540 * VIRTCHNL_OP_GET_VF_RESOURCES message
 541 */
 542static void ice_vf_set_initialized(struct ice_vf *vf)
 543{
 544	ice_set_vf_state_qs_dis(vf);
 545	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 546	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 547	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
 548	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
 549	memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
 550}
 551
 552/**
 553 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
 554 * @vf: the VF being reset
 555 *
 556 * Perform reset tasks which must occur after the VSI has been re-created or
 557 * rebuilt during a VF reset.
 558 */
 559static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
 560{
 561	ice_vf_rebuild_host_cfg(vf);
 562	ice_vf_set_initialized(vf);
 563
 564	vf->vf_ops->post_vsi_rebuild(vf);
 565}
 566
 567/**
 568 * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
 569 * are in unicast promiscuous mode
 570 * @pf: PF structure for accessing VF(s)
 571 *
 572 * Return false if no VF(s) are in unicast promiscuous mode,
 573 * else return true
 574 */
 575bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
 576{
 577	bool is_vf_promisc = false;
 578	struct ice_vf *vf;
 579	unsigned int bkt;
 580
 581	rcu_read_lock();
 582	ice_for_each_vf_rcu(pf, bkt, vf) {
 583		/* found a VF that has promiscuous mode configured */
 584		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
 585			is_vf_promisc = true;
 586			break;
 587		}
 588	}
 589	rcu_read_unlock();
 590
 591	return is_vf_promisc;
 592}
 593
 594/**
 595 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
 596 * @vf: the VF pointer
 597 * @vsi: the VSI to configure
 598 * @ucast_m: promiscuous mask to apply to unicast
 599 * @mcast_m: promiscuous mask to apply to multicast
 600 *
 601 * Decide which mask should be used for unicast and multicast filter,
 602 * based on presence of VLANs
 603 */
 604void
 605ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
 606			 u8 *ucast_m, u8 *mcast_m)
 607{
 608	if (ice_vf_is_port_vlan_ena(vf) ||
 609	    ice_vsi_has_non_zero_vlans(vsi)) {
 610		*mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
 611		*ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
 612	} else {
 613		*mcast_m = ICE_MCAST_PROMISC_BITS;
 614		*ucast_m = ICE_UCAST_PROMISC_BITS;
 615	}
 616}
 617
 618/**
 619 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
 620 * @vf: the VF pointer
 621 * @vsi: the VSI to configure
 622 *
 623 * Clear all promiscuous/allmulticast filters for a VF
 624 */
 625static int
 626ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
 627{
 628	struct ice_pf *pf = vf->pf;
 629	u8 ucast_m, mcast_m;
 630	int ret = 0;
 631
 632	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
 633	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
 634		if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
 635			if (ice_is_dflt_vsi_in_use(vsi->port_info))
 636				ret = ice_clear_dflt_vsi(vsi);
 637		} else {
 638			ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
 639		}
 640
 641		if (ret) {
 642			dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
 643		} else {
 644			clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 645			dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
 646		}
 647	}
 648
 649	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
 650		ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
 651		if (ret) {
 652			dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
 653		} else {
 654			clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 655			dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
 656		}
 657	}
 658	return ret;
 659}
 660
 661/**
 662 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
 663 * @vf: the VF to configure
 664 * @vsi: the VF's VSI
 665 * @promisc_m: the promiscuous mode to enable
 666 */
 667int
 668ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
 669{
 670	struct ice_hw *hw = &vsi->back->hw;
 671	int status;
 672
 673	if (ice_vf_is_port_vlan_ena(vf))
 674		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
 675						  ice_vf_get_port_vlan_id(vf));
 676	else if (ice_vsi_has_non_zero_vlans(vsi))
 677		status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
 678	else
 679		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
 680
 681	if (status && status != -EEXIST) {
 682		dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
 683			vf->vf_id, status);
 684		return status;
 685	}
 686
 687	return 0;
 688}
 689
 690/**
 691 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
 692 * @vf: the VF to configure
 693 * @vsi: the VF's VSI
 694 * @promisc_m: the promiscuous mode to disable
 695 */
 696int
 697ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
 698{
 699	struct ice_hw *hw = &vsi->back->hw;
 700	int status;
 701
 702	if (ice_vf_is_port_vlan_ena(vf))
 703		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
 704						    ice_vf_get_port_vlan_id(vf));
 705	else if (ice_vsi_has_non_zero_vlans(vsi))
 706		status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
 707	else
 708		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
 709
 710	if (status && status != -ENOENT) {
 711		dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
 712			vf->vf_id, status);
 713		return status;
 714	}
 715
 716	return 0;
 717}
 718
 719/**
 720 * ice_reset_vf_mbx_cnt - reset VF mailbox message count
 721 * @vf: pointer to the VF structure
 722 *
 723 * This function clears the VF mailbox message count, and should be called on
 724 * VF reset.
 725 */
 726static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
 727{
 728	struct ice_pf *pf = vf->pf;
 729
 730	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
 731		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
 732	else
 733		ice_mbx_clear_malvf(&vf->mbx_info);
 734}
 735
 736/**
 737 * ice_reset_all_vfs - reset all allocated VFs in one go
 738 * @pf: pointer to the PF structure
 739 *
 740 * Reset all VFs at once, in response to a PF or other device reset.
 741 *
 742 * First, tell the hardware to reset each VF, then do all the waiting in one
 743 * chunk, and finally finish restoring each VF after the wait. This is useful
 744 * during PF routines which need to reset all VFs, as otherwise it must perform
 745 * these resets in a serialized fashion.
 746 */
 747void ice_reset_all_vfs(struct ice_pf *pf)
 748{
 749	struct device *dev = ice_pf_to_dev(pf);
 750	struct ice_hw *hw = &pf->hw;
 751	struct ice_vf *vf;
 752	unsigned int bkt;
 753
 754	/* If we don't have any VFs, then there is nothing to reset */
 755	if (!ice_has_vfs(pf))
 756		return;
 757
 758	mutex_lock(&pf->vfs.table_lock);
 759
 760	/* clear all malicious info if the VFs are getting reset */
 761	ice_for_each_vf(pf, bkt, vf)
 762		ice_reset_vf_mbx_cnt(vf);
 763
 764	/* If VFs have been disabled, there is no need to reset */
 765	if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
 766		mutex_unlock(&pf->vfs.table_lock);
 767		return;
 768	}
 769
 770	/* Begin reset on all VFs at once */
 771	ice_for_each_vf(pf, bkt, vf)
 772		ice_trigger_vf_reset(vf, true, true);
 773
 774	/* HW requires some time to make sure it can flush the FIFO for a VF
 775	 * when it resets it. Now that we've triggered all of the VFs, iterate
 776	 * the table again and wait for each VF to complete.
 777	 */
 778	ice_for_each_vf(pf, bkt, vf) {
 779		if (!vf->vf_ops->poll_reset_status(vf)) {
 780			/* Display a warning if at least one VF didn't manage
 781			 * to reset in time, but continue on with the
 782			 * operation.
 783			 */
 784			dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
 785			break;
 786		}
 787	}
 788
 789	/* free VF resources to begin resetting the VSI state */
 790	ice_for_each_vf(pf, bkt, vf) {
 791		mutex_lock(&vf->cfg_lock);
 792
 793		ice_eswitch_detach_vf(pf, vf);
 794		vf->driver_caps = 0;
 795		ice_vc_set_default_allowlist(vf);
 796
 797		ice_vf_fdir_exit(vf);
 798		ice_vf_fdir_init(vf);
 799		/* clean VF control VSI when resetting VFs since it should be
 800		 * setup only when VF creates its first FDIR rule.
 801		 */
 802		if (vf->ctrl_vsi_idx != ICE_NO_VSI)
 803			ice_vf_ctrl_invalidate_vsi(vf);
 804
 805		ice_vf_pre_vsi_rebuild(vf);
 806		ice_vf_rebuild_vsi(vf);
 807		ice_vf_post_vsi_rebuild(vf);
 808
 809		ice_eswitch_attach_vf(pf, vf);
 810
 811		mutex_unlock(&vf->cfg_lock);
 812	}
 813
 814	ice_flush(hw);
 815	clear_bit(ICE_VF_DIS, pf->state);
 816
 817	mutex_unlock(&pf->vfs.table_lock);
 818}
 819
 820/**
 821 * ice_notify_vf_reset - Notify VF of a reset event
 822 * @vf: pointer to the VF structure
 823 */
 824static void ice_notify_vf_reset(struct ice_vf *vf)
 825{
 826	struct ice_hw *hw = &vf->pf->hw;
 827	struct virtchnl_pf_event pfe;
 828
 829	/* Bail out if VF is in disabled state, neither initialized, nor active
 830	 * state - otherwise proceed with notifications
 831	 */
 832	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
 833	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
 834	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
 835		return;
 836
 837	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 838	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 839	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
 840			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
 841			      NULL);
 842}
 843
 844/**
 845 * ice_reset_vf - Reset a particular VF
 846 * @vf: pointer to the VF structure
 847 * @flags: flags controlling behavior of the reset
 848 *
 849 * Flags:
 850 *   ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
 851 *   ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
 852 *   ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
 853 *
 854 * Returns 0 if the VF is currently in reset, if resets are disabled, or if
 855 * the VF resets successfully. Returns an error code if the VF fails to
 856 * rebuild.
 857 */
 858int ice_reset_vf(struct ice_vf *vf, u32 flags)
 859{
 860	struct ice_pf *pf = vf->pf;
 861	struct ice_lag *lag;
 862	struct ice_vsi *vsi;
 863	u8 act_prt, pri_prt;
 864	struct device *dev;
 865	int err = 0;
 866	bool rsd;
 867
 868	dev = ice_pf_to_dev(pf);
 869	act_prt = ICE_LAG_INVALID_PORT;
 870	pri_prt = pf->hw.port_info->lport;
 871
 872	if (flags & ICE_VF_RESET_NOTIFY)
 873		ice_notify_vf_reset(vf);
 874
 875	if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
 876		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
 877			vf->vf_id);
 878		return 0;
 879	}
 880
 881	if (flags & ICE_VF_RESET_LOCK)
 882		mutex_lock(&vf->cfg_lock);
 883	else
 884		lockdep_assert_held(&vf->cfg_lock);
 885
 886	lag = pf->lag;
 887	mutex_lock(&pf->lag_mutex);
 888	if (lag && lag->bonded && lag->primary) {
 889		act_prt = lag->active_port;
 890		if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
 891		    lag->upper_netdev)
 892			ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
 893		else
 894			act_prt = ICE_LAG_INVALID_PORT;
 895	}
 896
 
 
 
 
 
 897	if (ice_is_vf_disabled(vf)) {
 898		vsi = ice_get_vf_vsi(vf);
 899		if (!vsi) {
 900			dev_dbg(dev, "VF is already removed\n");
 901			err = -EINVAL;
 902			goto out_unlock;
 903		}
 904		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
 905
 906		if (ice_vsi_is_rx_queue_active(vsi))
 907			ice_vsi_stop_all_rx_rings(vsi);
 908
 909		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
 910			vf->vf_id);
 911		goto out_unlock;
 912	}
 913
 914	/* Set VF disable bit state here, before triggering reset */
 915	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
 916	ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
 917
 918	vsi = ice_get_vf_vsi(vf);
 919	if (WARN_ON(!vsi)) {
 920		err = -EIO;
 921		goto out_unlock;
 922	}
 923
 924	ice_dis_vf_qs(vf);
 925
 926	/* Call Disable LAN Tx queue AQ whether or not queues are
 927	 * enabled. This is needed for successful completion of VFR.
 928	 */
 929	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
 930			NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
 931
 932	/* poll VPGEN_VFRSTAT reg to make sure
 933	 * that reset is complete
 934	 */
 935	rsd = vf->vf_ops->poll_reset_status(vf);
 936
 937	/* Display a warning if VF didn't manage to reset in time, but need to
 938	 * continue on with the operation.
 939	 */
 940	if (!rsd)
 941		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
 942
 943	vf->driver_caps = 0;
 944	ice_vc_set_default_allowlist(vf);
 945
 946	/* disable promiscuous modes in case they were enabled
 947	 * ignore any error if disabling process failed
 948	 */
 949	ice_vf_clear_all_promisc_modes(vf, vsi);
 950
 951	ice_vf_fdir_exit(vf);
 952	ice_vf_fdir_init(vf);
 953	/* clean VF control VSI when resetting VF since it should be setup
 954	 * only when VF creates its first FDIR rule.
 955	 */
 956	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
 957		ice_vf_ctrl_vsi_release(vf);
 958
 959	ice_vf_pre_vsi_rebuild(vf);
 960
 961	if (ice_vf_reconfig_vsi(vf)) {
 962		dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
 963			vf->vf_id);
 964		err = -EFAULT;
 965		goto out_unlock;
 966	}
 967
 968	ice_vf_post_vsi_rebuild(vf);
 969	vsi = ice_get_vf_vsi(vf);
 970	if (WARN_ON(!vsi)) {
 971		err = -EINVAL;
 972		goto out_unlock;
 973	}
 974
 975	ice_eswitch_update_repr(&vf->repr_id, vsi);
 976
 977	/* if the VF has been reset allow it to come up again */
 978	ice_reset_vf_mbx_cnt(vf);
 979
 980out_unlock:
 
 
 
 981	if (lag && lag->bonded && lag->primary &&
 982	    act_prt != ICE_LAG_INVALID_PORT)
 983		ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
 984	mutex_unlock(&pf->lag_mutex);
 985
 986	if (flags & ICE_VF_RESET_LOCK)
 987		mutex_unlock(&vf->cfg_lock);
 988
 989	return err;
 990}
 991
 992/**
 993 * ice_set_vf_state_dis - Set VF state to disabled
 994 * @vf: pointer to the VF structure
 995 */
 996void ice_set_vf_state_dis(struct ice_vf *vf)
 997{
 998	ice_set_vf_state_qs_dis(vf);
 999	vf->vf_ops->clear_reset_state(vf);
1000}
1001
1002/* Private functions only accessed from other virtualization files */
1003
1004/**
1005 * ice_initialize_vf_entry - Initialize a VF entry
1006 * @vf: pointer to the VF structure
1007 */
1008void ice_initialize_vf_entry(struct ice_vf *vf)
1009{
1010	struct ice_pf *pf = vf->pf;
1011	struct ice_vfs *vfs;
1012
1013	vfs = &pf->vfs;
1014
1015	/* assign default capabilities */
1016	vf->spoofchk = true;
 
1017	ice_vc_set_default_allowlist(vf);
1018	ice_virtchnl_set_dflt_ops(vf);
1019
1020	/* set default number of MSI-X */
1021	vf->num_msix = vfs->num_msix_per;
1022	vf->num_vf_qs = vfs->num_qps_per;
1023
1024	/* ctrl_vsi_idx will be set to a valid value only when iAVF
1025	 * creates its first fdir rule.
1026	 */
1027	ice_vf_ctrl_invalidate_vsi(vf);
1028	ice_vf_fdir_init(vf);
1029
1030	/* Initialize mailbox info for this VF */
1031	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
1032		ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
1033	else
1034		ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
1035
1036	mutex_init(&vf->cfg_lock);
1037}
1038
1039void ice_deinitialize_vf_entry(struct ice_vf *vf)
1040{
1041	struct ice_pf *pf = vf->pf;
1042
1043	if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
1044		list_del(&vf->mbx_info.list_entry);
1045}
1046
1047/**
1048 * ice_dis_vf_qs - Disable the VF queues
1049 * @vf: pointer to the VF structure
1050 */
1051void ice_dis_vf_qs(struct ice_vf *vf)
1052{
1053	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1054
1055	if (WARN_ON(!vsi))
1056		return;
1057
1058	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1059	ice_vsi_stop_all_rx_rings(vsi);
1060	ice_set_vf_state_qs_dis(vf);
1061}
1062
1063/**
1064 * ice_err_to_virt_err - translate errors for VF return code
1065 * @err: error return code
1066 */
1067enum virtchnl_status_code ice_err_to_virt_err(int err)
1068{
1069	switch (err) {
1070	case 0:
1071		return VIRTCHNL_STATUS_SUCCESS;
1072	case -EINVAL:
1073	case -ENODEV:
1074		return VIRTCHNL_STATUS_ERR_PARAM;
1075	case -ENOMEM:
1076		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1077	case -EALREADY:
1078	case -EBUSY:
1079	case -EIO:
1080	case -ENOSPC:
1081		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1082	default:
1083		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1084	}
1085}
1086
1087/**
1088 * ice_check_vf_init - helper to check if VF init complete
1089 * @vf: the pointer to the VF to check
1090 */
1091int ice_check_vf_init(struct ice_vf *vf)
1092{
1093	struct ice_pf *pf = vf->pf;
1094
1095	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1096		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1097			vf->vf_id);
1098		return -EBUSY;
1099	}
1100	return 0;
1101}
1102
1103/**
1104 * ice_vf_get_port_info - Get the VF's port info structure
1105 * @vf: VF used to get the port info structure for
1106 */
1107struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1108{
1109	return vf->pf->hw.port_info;
1110}
1111
1112/**
1113 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1114 * @vsi: the VSI to configure
1115 * @enable: whether to enable or disable the spoof checking
1116 *
1117 * Configure a VSI to enable (or disable) spoof checking behavior.
1118 */
1119static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1120{
1121	struct ice_vsi_ctx *ctx;
1122	int err;
1123
1124	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1125	if (!ctx)
1126		return -ENOMEM;
1127
1128	ctx->info.sec_flags = vsi->info.sec_flags;
1129	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1130
1131	if (enable)
1132		ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1133	else
1134		ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1135
1136	err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1137	if (err)
1138		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1139			enable ? "ON" : "OFF", vsi->vsi_num, err);
1140	else
1141		vsi->info.sec_flags = ctx->info.sec_flags;
1142
1143	kfree(ctx);
1144
1145	return err;
1146}
1147
1148/**
1149 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1150 * @vsi: VSI to enable Tx spoof checking for
1151 */
1152static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1153{
1154	struct ice_vsi_vlan_ops *vlan_ops;
1155	int err = 0;
1156
1157	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1158
1159	/* Allow VF with VLAN 0 only to send all tagged traffic */
1160	if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1161		err = vlan_ops->ena_tx_filtering(vsi);
1162		if (err)
1163			return err;
1164	}
1165
1166	return ice_cfg_mac_antispoof(vsi, true);
1167}
1168
1169/**
1170 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1171 * @vsi: VSI to disable Tx spoof checking for
1172 */
1173static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1174{
1175	struct ice_vsi_vlan_ops *vlan_ops;
1176	int err;
1177
1178	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1179
1180	err = vlan_ops->dis_tx_filtering(vsi);
1181	if (err)
1182		return err;
1183
1184	return ice_cfg_mac_antispoof(vsi, false);
1185}
1186
1187/**
1188 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1189 * @vsi: VSI associated to the VF
1190 * @enable: whether to enable or disable the spoof checking
1191 */
1192int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1193{
1194	int err;
1195
1196	if (enable)
1197		err = ice_vsi_ena_spoofchk(vsi);
1198	else
1199		err = ice_vsi_dis_spoofchk(vsi);
1200
1201	return err;
1202}
1203
1204/**
1205 * ice_is_vf_trusted
1206 * @vf: pointer to the VF info
1207 */
1208bool ice_is_vf_trusted(struct ice_vf *vf)
1209{
1210	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1211}
1212
1213/**
1214 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1215 * @vf: the VF to check
1216 *
1217 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1218 * otherwise
1219 */
1220bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1221{
1222	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1223		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1224}
1225
1226/**
1227 * ice_is_vf_link_up - check if the VF's link is up
1228 * @vf: VF to check if link is up
1229 */
1230bool ice_is_vf_link_up(struct ice_vf *vf)
1231{
1232	struct ice_port_info *pi = ice_vf_get_port_info(vf);
1233
1234	if (ice_check_vf_init(vf))
1235		return false;
1236
1237	if (ice_vf_has_no_qs_ena(vf))
1238		return false;
1239	else if (vf->link_forced)
1240		return vf->link_up;
1241	else
1242		return pi->phy.link_info.link_info &
1243			ICE_AQ_LINK_UP;
1244}
1245
1246/**
1247 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1248 * @vf: VF that control VSI is being invalidated on
1249 */
1250void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1251{
1252	vf->ctrl_vsi_idx = ICE_NO_VSI;
1253}
1254
1255/**
1256 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1257 * @vf: VF that control VSI is being released on
1258 */
1259void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1260{
1261	ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1262	ice_vf_ctrl_invalidate_vsi(vf);
1263}
1264
1265/**
1266 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1267 * @vf: VF to setup control VSI for
1268 *
1269 * Returns pointer to the successfully allocated VSI struct on success,
1270 * otherwise returns NULL on failure.
1271 */
1272struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1273{
1274	struct ice_vsi_cfg_params params = {};
1275	struct ice_pf *pf = vf->pf;
1276	struct ice_vsi *vsi;
1277
1278	params.type = ICE_VSI_CTRL;
1279	params.port_info = ice_vf_get_port_info(vf);
1280	params.vf = vf;
1281	params.flags = ICE_VSI_FLAG_INIT;
1282
1283	vsi = ice_vsi_setup(pf, &params);
1284	if (!vsi) {
1285		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1286		ice_vf_ctrl_invalidate_vsi(vf);
1287	}
1288
1289	return vsi;
1290}
1291
1292/**
1293 * ice_vf_init_host_cfg - Initialize host admin configuration
1294 * @vf: VF to initialize
1295 * @vsi: the VSI created at initialization
1296 *
1297 * Initialize the VF host configuration. Called during VF creation to setup
1298 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1299 * should only be called during VF creation.
1300 */
1301int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1302{
1303	struct ice_vsi_vlan_ops *vlan_ops;
1304	struct ice_pf *pf = vf->pf;
1305	u8 broadcast[ETH_ALEN];
1306	struct device *dev;
1307	int err;
1308
1309	dev = ice_pf_to_dev(pf);
1310
1311	err = ice_vsi_add_vlan_zero(vsi);
1312	if (err) {
1313		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1314			 vf->vf_id);
1315		return err;
1316	}
1317
1318	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1319	err = vlan_ops->ena_rx_filtering(vsi);
1320	if (err) {
1321		dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1322			 vf->vf_id);
1323		return err;
1324	}
1325
1326	eth_broadcast_addr(broadcast);
1327	err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1328	if (err) {
1329		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1330			vf->vf_id, err);
1331		return err;
1332	}
1333
1334	vf->num_mac = 1;
1335
1336	err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1337	if (err) {
1338		dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1339			 vf->vf_id);
1340		return err;
1341	}
1342
1343	return 0;
1344}
1345
1346/**
1347 * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
1348 * @vf: VF to remove access to VSI for
1349 */
1350void ice_vf_invalidate_vsi(struct ice_vf *vf)
1351{
1352	vf->lan_vsi_idx = ICE_NO_VSI;
 
1353}
1354
1355/**
1356 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1357 * @vf: pointer to the VF structure
1358 *
1359 * Release the VF associated with this VSI and then invalidate the VSI
1360 * indexes.
1361 */
1362void ice_vf_vsi_release(struct ice_vf *vf)
1363{
1364	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1365
1366	if (WARN_ON(!vsi))
1367		return;
1368
1369	ice_vsi_release(vsi);
1370	ice_vf_invalidate_vsi(vf);
1371}
1372
1373/**
1374 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1375 * @pf: the PF private structure
1376 * @vsi: pointer to the VSI
1377 *
1378 * Return first found VF control VSI other than the vsi
1379 * passed by parameter. This function is used to determine
1380 * whether new resources have to be allocated for control VSI
1381 * or they can be shared with existing one.
1382 *
1383 * Return found VF control VSI pointer other itself. Return
1384 * NULL Otherwise.
1385 *
1386 */
1387struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1388{
1389	struct ice_vsi *ctrl_vsi = NULL;
1390	struct ice_vf *vf;
1391	unsigned int bkt;
1392
1393	rcu_read_lock();
1394	ice_for_each_vf_rcu(pf, bkt, vf) {
1395		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1396			ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1397			break;
1398		}
1399	}
1400
1401	rcu_read_unlock();
1402	return ctrl_vsi;
1403}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2022, Intel Corporation. */
   3
   4#include "ice_vf_lib_private.h"
   5#include "ice.h"
   6#include "ice_lib.h"
   7#include "ice_fltr.h"
   8#include "ice_virtchnl_allowlist.h"
   9
  10/* Public functions which may be accessed by all driver files */
  11
  12/**
  13 * ice_get_vf_by_id - Get pointer to VF by ID
  14 * @pf: the PF private structure
  15 * @vf_id: the VF ID to locate
  16 *
  17 * Locate and return a pointer to the VF structure associated with a given ID.
  18 * Returns NULL if the ID does not have a valid VF structure associated with
  19 * it.
  20 *
  21 * This function takes a reference to the VF, which must be released by
  22 * calling ice_put_vf() once the caller is finished accessing the VF structure
  23 * returned.
  24 */
  25struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
  26{
  27	struct ice_vf *vf;
  28
  29	rcu_read_lock();
  30	hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
  31		if (vf->vf_id == vf_id) {
  32			struct ice_vf *found;
  33
  34			if (kref_get_unless_zero(&vf->refcnt))
  35				found = vf;
  36			else
  37				found = NULL;
  38
  39			rcu_read_unlock();
  40			return found;
  41		}
  42	}
  43	rcu_read_unlock();
  44
  45	return NULL;
  46}
  47
  48/**
  49 * ice_release_vf - Release VF associated with a refcount
  50 * @ref: the kref decremented to zero
  51 *
  52 * Callback function for kref_put to release a VF once its reference count has
  53 * hit zero.
  54 */
  55static void ice_release_vf(struct kref *ref)
  56{
  57	struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
  58
  59	pci_dev_put(vf->vfdev);
  60
  61	vf->vf_ops->free(vf);
  62}
  63
  64/**
  65 * ice_put_vf - Release a reference to a VF
  66 * @vf: the VF structure to decrease reference count on
  67 *
  68 * Decrease the reference count for a VF, and free the entry if it is no
  69 * longer in use.
  70 *
  71 * This must be called after ice_get_vf_by_id() once the reference to the VF
  72 * structure is no longer used. Otherwise, the VF structure will never be
  73 * freed.
  74 */
  75void ice_put_vf(struct ice_vf *vf)
  76{
  77	kref_put(&vf->refcnt, ice_release_vf);
  78}
  79
  80/**
  81 * ice_has_vfs - Return true if the PF has any associated VFs
  82 * @pf: the PF private structure
  83 *
  84 * Return whether or not the PF has any allocated VFs.
  85 *
  86 * Note that this function only guarantees that there are no VFs at the point
  87 * of calling it. It does not guarantee that no more VFs will be added.
  88 */
  89bool ice_has_vfs(struct ice_pf *pf)
  90{
  91	/* A simple check that the hash table is not empty does not require
  92	 * the mutex or rcu_read_lock.
  93	 */
  94	return !hash_empty(pf->vfs.table);
  95}
  96
  97/**
  98 * ice_get_num_vfs - Get number of allocated VFs
  99 * @pf: the PF private structure
 100 *
 101 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
 102 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
 103 * the output of this function.
 104 */
 105u16 ice_get_num_vfs(struct ice_pf *pf)
 106{
 107	struct ice_vf *vf;
 108	unsigned int bkt;
 109	u16 num_vfs = 0;
 110
 111	rcu_read_lock();
 112	ice_for_each_vf_rcu(pf, bkt, vf)
 113		num_vfs++;
 114	rcu_read_unlock();
 115
 116	return num_vfs;
 117}
 118
 119/**
 120 * ice_get_vf_vsi - get VF's VSI based on the stored index
 121 * @vf: VF used to get VSI
 122 */
 123struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
 124{
 125	if (vf->lan_vsi_idx == ICE_NO_VSI)
 126		return NULL;
 127
 128	return vf->pf->vsi[vf->lan_vsi_idx];
 129}
 130
 131/**
 132 * ice_is_vf_disabled
 133 * @vf: pointer to the VF info
 134 *
 135 * If the PF has been disabled, there is no need resetting VF until PF is
 136 * active again. Similarly, if the VF has been disabled, this means something
 137 * else is resetting the VF, so we shouldn't continue.
 138 *
 139 * Returns true if the caller should consider the VF as disabled whether
 140 * because that single VF is explicitly disabled or because the PF is
 141 * currently disabled.
 142 */
 143bool ice_is_vf_disabled(struct ice_vf *vf)
 144{
 145	struct ice_pf *pf = vf->pf;
 146
 147	return (test_bit(ICE_VF_DIS, pf->state) ||
 148		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
 149}
 150
 151/**
 152 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
 153 * @vf: The VF being resseting
 154 *
 155 * The max poll time is about ~800ms, which is about the maximum time it takes
 156 * for a VF to be reset and/or a VF driver to be removed.
 157 */
 158static void ice_wait_on_vf_reset(struct ice_vf *vf)
 159{
 160	int i;
 161
 162	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
 163		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
 164			break;
 165		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
 166	}
 167}
 168
 169/**
 170 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
 171 * @vf: VF to check if it's ready to be configured/queried
 172 *
 173 * The purpose of this function is to make sure the VF is not in reset, not
 174 * disabled, and initialized so it can be configured and/or queried by a host
 175 * administrator.
 176 */
 177int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
 178{
 179	ice_wait_on_vf_reset(vf);
 180
 181	if (ice_is_vf_disabled(vf))
 182		return -EINVAL;
 183
 184	if (ice_check_vf_init(vf))
 185		return -EBUSY;
 186
 187	return 0;
 188}
 189
 190/**
 191 * ice_trigger_vf_reset - Reset a VF on HW
 192 * @vf: pointer to the VF structure
 193 * @is_vflr: true if VFLR was issued, false if not
 194 * @is_pfr: true if the reset was triggered due to a previous PFR
 195 *
 196 * Trigger hardware to start a reset for a particular VF. Expects the caller
 197 * to wait the proper amount of time to allow hardware to reset the VF before
 198 * it cleans up and restores VF functionality.
 199 */
 200static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
 201{
 202	/* Inform VF that it is no longer active, as a warning */
 203	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 204
 205	/* Disable VF's configuration API during reset. The flag is re-enabled
 206	 * when it's safe again to access VF's VSI.
 207	 */
 208	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 209
 210	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
 211	 * needs to clear them in the case of VFR/VFLR. If this is done for
 212	 * PFR, it can mess up VF resets because the VF driver may already
 213	 * have started cleanup by the time we get here.
 214	 */
 215	if (!is_pfr)
 216		vf->vf_ops->clear_mbx_register(vf);
 217
 218	vf->vf_ops->trigger_reset_register(vf, is_vflr);
 219}
 220
 221static void ice_vf_clear_counters(struct ice_vf *vf)
 222{
 223	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 224
 225	if (vsi)
 226		vsi->num_vlan = 0;
 227
 228	vf->num_mac = 0;
 229	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
 230	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
 231}
 232
 233/**
 234 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
 235 * @vf: VF to perform pre VSI rebuild tasks
 236 *
 237 * These tasks are items that don't need to be amortized since they are most
 238 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
 239 */
 240static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
 241{
 242	/* Close any IRQ mapping now */
 243	if (vf->vf_ops->irq_close)
 244		vf->vf_ops->irq_close(vf);
 245
 246	ice_vf_clear_counters(vf);
 247	vf->vf_ops->clear_reset_trigger(vf);
 248}
 249
 250/**
 251 * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
 252 * @vf: VF to reconfigure the VSI for
 253 *
 254 * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
 255 * configuration change, etc).
 256 *
 257 * It brings the VSI down and then reconfigures it with the hardware.
 258 */
 259int ice_vf_reconfig_vsi(struct ice_vf *vf)
 260{
 261	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 262	struct ice_vsi_cfg_params params = {};
 263	struct ice_pf *pf = vf->pf;
 264	int err;
 265
 266	if (WARN_ON(!vsi))
 267		return -EINVAL;
 268
 269	params = ice_vsi_to_params(vsi);
 270	params.flags = ICE_VSI_FLAG_NO_INIT;
 271
 272	ice_vsi_decfg(vsi);
 273	ice_fltr_remove_all(vsi);
 274
 275	err = ice_vsi_cfg(vsi, &params);
 276	if (err) {
 277		dev_err(ice_pf_to_dev(pf),
 278			"Failed to reconfigure the VF%u's VSI, error %d\n",
 279			vf->vf_id, err);
 280		return err;
 281	}
 282
 283	/* Update the lan_vsi_num field since it might have been changed. The
 284	 * PF lan_vsi_idx number remains the same so we don't need to change
 285	 * that.
 286	 */
 287	vf->lan_vsi_num = vsi->vsi_num;
 288
 289	return 0;
 290}
 291
 292/**
 293 * ice_vf_rebuild_vsi - rebuild the VF's VSI
 294 * @vf: VF to rebuild the VSI for
 295 *
 296 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
 297 * host, PFR, CORER, etc.).
 298 *
 299 * It reprograms the VSI configuration back into hardware.
 300 */
 301static int ice_vf_rebuild_vsi(struct ice_vf *vf)
 302{
 303	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 304	struct ice_pf *pf = vf->pf;
 305
 306	if (WARN_ON(!vsi))
 307		return -EINVAL;
 308
 309	if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
 310		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
 311			vf->vf_id);
 312		return -EIO;
 313	}
 314	/* vsi->idx will remain the same in this case so don't update
 315	 * vf->lan_vsi_idx
 316	 */
 317	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
 318	vf->lan_vsi_num = vsi->vsi_num;
 319
 320	return 0;
 321}
 322
 323/**
 324 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
 325 * @vf: VF to add MAC filters for
 326 * @vsi: Pointer to VSI
 327 *
 328 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 329 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
 330 */
 331static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
 332{
 333	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
 334	struct device *dev = ice_pf_to_dev(vf->pf);
 335	int err;
 336
 337	if (ice_vf_is_port_vlan_ena(vf)) {
 338		err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
 339		if (err) {
 340			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
 341				vf->vf_id, err);
 342			return err;
 343		}
 344
 345		err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
 346	} else {
 
 
 
 
 
 
 
 347		err = ice_vsi_add_vlan_zero(vsi);
 348	}
 349
 350	if (err) {
 351		dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
 352			ice_vf_is_port_vlan_ena(vf) ?
 353			ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
 354		return err;
 355	}
 356
 357	err = vlan_ops->ena_rx_filtering(vsi);
 358	if (err)
 359		dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
 360			 vf->vf_id, vsi->idx, err);
 361
 362	return 0;
 363}
 364
 365/**
 366 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
 367 * @vf: VF to re-apply the configuration for
 368 *
 369 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
 370 * needs to re-apply the host configured Tx rate limiting configuration.
 371 */
 372static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
 373{
 374	struct device *dev = ice_pf_to_dev(vf->pf);
 375	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 376	int err;
 377
 378	if (WARN_ON(!vsi))
 379		return -EINVAL;
 380
 381	if (vf->min_tx_rate) {
 382		err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
 383		if (err) {
 384			dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
 385				vf->min_tx_rate, vf->vf_id, err);
 386			return err;
 387		}
 388	}
 389
 390	if (vf->max_tx_rate) {
 391		err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
 392		if (err) {
 393			dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
 394				vf->max_tx_rate, vf->vf_id, err);
 395			return err;
 396		}
 397	}
 398
 399	return 0;
 400}
 401
 402/**
 403 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
 404 * @vf: VF to configure trust setting for
 405 */
 406static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
 407{
 408	assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
 409}
 410
 411/**
 412 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
 413 * @vf: VF to add MAC filters for
 414 *
 415 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 416 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
 417 */
 418static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
 419{
 420	struct device *dev = ice_pf_to_dev(vf->pf);
 421	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 422	u8 broadcast[ETH_ALEN];
 423	int status;
 424
 425	if (WARN_ON(!vsi))
 426		return -EINVAL;
 427
 428	if (ice_is_eswitch_mode_switchdev(vf->pf))
 429		return 0;
 430
 431	eth_broadcast_addr(broadcast);
 432	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
 433	if (status) {
 434		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
 435			vf->vf_id, status);
 436		return status;
 437	}
 438
 439	vf->num_mac++;
 440
 441	if (is_valid_ether_addr(vf->hw_lan_addr)) {
 442		status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
 443					  ICE_FWD_TO_VSI);
 444		if (status) {
 445			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
 446				&vf->hw_lan_addr[0], vf->vf_id,
 447				status);
 448			return status;
 449		}
 450		vf->num_mac++;
 451
 452		ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
 453	}
 454
 455	return 0;
 456}
 457
 458/**
 459 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
 460 * @vsi: Pointer to VSI
 461 *
 462 * This function moves VSI into corresponding scheduler aggregator node
 463 * based on cached value of "aggregator node info" per VSI
 464 */
 465static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
 466{
 467	struct ice_pf *pf = vsi->back;
 468	struct device *dev;
 469	int status;
 470
 471	if (!vsi->agg_node)
 472		return;
 473
 474	dev = ice_pf_to_dev(pf);
 475	if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
 476		dev_dbg(dev,
 477			"agg_id %u already has reached max_num_vsis %u\n",
 478			vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
 479		return;
 480	}
 481
 482	status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
 483				     vsi->idx, vsi->tc_cfg.ena_tc);
 484	if (status)
 485		dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
 486			vsi->idx, vsi->agg_node->agg_id);
 487	else
 488		vsi->agg_node->num_vsis++;
 489}
 490
 491/**
 492 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
 493 * @vf: VF to rebuild host configuration on
 494 */
 495static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
 496{
 497	struct device *dev = ice_pf_to_dev(vf->pf);
 498	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
 499
 500	if (WARN_ON(!vsi))
 501		return;
 502
 503	ice_vf_set_host_trust_cfg(vf);
 504
 505	if (ice_vf_rebuild_host_mac_cfg(vf))
 506		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
 507			vf->vf_id);
 508
 509	if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
 510		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
 511			vf->vf_id);
 512
 513	if (ice_vf_rebuild_host_tx_rate_cfg(vf))
 514		dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
 515			vf->vf_id);
 516
 517	if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
 518		dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
 519			vf->vf_id);
 520
 521	/* rebuild aggregator node config for main VF VSI */
 522	ice_vf_rebuild_aggregator_node_cfg(vsi);
 523}
 524
 525/**
 526 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
 527 * @vf: pointer to the VF structure
 528 */
 529static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
 530{
 531	/* Clear Rx/Tx enabled queues flag */
 532	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
 533	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
 534	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
 535}
 536
 537/**
 538 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
 539 * @vf: VF to set in initialized state
 540 *
 541 * After this function the VF will be ready to receive/handle the
 542 * VIRTCHNL_OP_GET_VF_RESOURCES message
 543 */
 544static void ice_vf_set_initialized(struct ice_vf *vf)
 545{
 546	ice_set_vf_state_qs_dis(vf);
 547	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 548	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 549	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
 550	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
 551	memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
 552}
 553
 554/**
 555 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
 556 * @vf: the VF being reset
 557 *
 558 * Perform reset tasks which must occur after the VSI has been re-created or
 559 * rebuilt during a VF reset.
 560 */
 561static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
 562{
 563	ice_vf_rebuild_host_cfg(vf);
 564	ice_vf_set_initialized(vf);
 565
 566	vf->vf_ops->post_vsi_rebuild(vf);
 567}
 568
 569/**
 570 * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
 571 * are in unicast promiscuous mode
 572 * @pf: PF structure for accessing VF(s)
 573 *
 574 * Return false if no VF(s) are in unicast promiscuous mode,
 575 * else return true
 576 */
 577bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
 578{
 579	bool is_vf_promisc = false;
 580	struct ice_vf *vf;
 581	unsigned int bkt;
 582
 583	rcu_read_lock();
 584	ice_for_each_vf_rcu(pf, bkt, vf) {
 585		/* found a VF that has promiscuous mode configured */
 586		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
 587			is_vf_promisc = true;
 588			break;
 589		}
 590	}
 591	rcu_read_unlock();
 592
 593	return is_vf_promisc;
 594}
 595
 596/**
 597 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
 598 * @vf: the VF pointer
 599 * @vsi: the VSI to configure
 600 * @ucast_m: promiscuous mask to apply to unicast
 601 * @mcast_m: promiscuous mask to apply to multicast
 602 *
 603 * Decide which mask should be used for unicast and multicast filter,
 604 * based on presence of VLANs
 605 */
 606void
 607ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
 608			 u8 *ucast_m, u8 *mcast_m)
 609{
 610	if (ice_vf_is_port_vlan_ena(vf) ||
 611	    ice_vsi_has_non_zero_vlans(vsi)) {
 612		*mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
 613		*ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
 614	} else {
 615		*mcast_m = ICE_MCAST_PROMISC_BITS;
 616		*ucast_m = ICE_UCAST_PROMISC_BITS;
 617	}
 618}
 619
 620/**
 621 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
 622 * @vf: the VF pointer
 623 * @vsi: the VSI to configure
 624 *
 625 * Clear all promiscuous/allmulticast filters for a VF
 626 */
 627static int
 628ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
 629{
 630	struct ice_pf *pf = vf->pf;
 631	u8 ucast_m, mcast_m;
 632	int ret = 0;
 633
 634	ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
 635	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
 636		if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
 637			if (ice_is_dflt_vsi_in_use(vsi->port_info))
 638				ret = ice_clear_dflt_vsi(vsi);
 639		} else {
 640			ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
 641		}
 642
 643		if (ret) {
 644			dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
 645		} else {
 646			clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 647			dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
 648		}
 649	}
 650
 651	if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
 652		ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
 653		if (ret) {
 654			dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
 655		} else {
 656			clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 657			dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
 658		}
 659	}
 660	return ret;
 661}
 662
 663/**
 664 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
 665 * @vf: the VF to configure
 666 * @vsi: the VF's VSI
 667 * @promisc_m: the promiscuous mode to enable
 668 */
 669int
 670ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
 671{
 672	struct ice_hw *hw = &vsi->back->hw;
 673	int status;
 674
 675	if (ice_vf_is_port_vlan_ena(vf))
 676		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
 677						  ice_vf_get_port_vlan_id(vf));
 678	else if (ice_vsi_has_non_zero_vlans(vsi))
 679		status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
 680	else
 681		status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
 682
 683	if (status && status != -EEXIST) {
 684		dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
 685			vf->vf_id, status);
 686		return status;
 687	}
 688
 689	return 0;
 690}
 691
 692/**
 693 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
 694 * @vf: the VF to configure
 695 * @vsi: the VF's VSI
 696 * @promisc_m: the promiscuous mode to disable
 697 */
 698int
 699ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
 700{
 701	struct ice_hw *hw = &vsi->back->hw;
 702	int status;
 703
 704	if (ice_vf_is_port_vlan_ena(vf))
 705		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
 706						    ice_vf_get_port_vlan_id(vf));
 707	else if (ice_vsi_has_non_zero_vlans(vsi))
 708		status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
 709	else
 710		status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
 711
 712	if (status && status != -ENOENT) {
 713		dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
 714			vf->vf_id, status);
 715		return status;
 716	}
 717
 718	return 0;
 719}
 720
 721/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 722 * ice_reset_all_vfs - reset all allocated VFs in one go
 723 * @pf: pointer to the PF structure
 724 *
 725 * Reset all VFs at once, in response to a PF or other device reset.
 726 *
 727 * First, tell the hardware to reset each VF, then do all the waiting in one
 728 * chunk, and finally finish restoring each VF after the wait. This is useful
 729 * during PF routines which need to reset all VFs, as otherwise it must perform
 730 * these resets in a serialized fashion.
 731 */
 732void ice_reset_all_vfs(struct ice_pf *pf)
 733{
 734	struct device *dev = ice_pf_to_dev(pf);
 735	struct ice_hw *hw = &pf->hw;
 736	struct ice_vf *vf;
 737	unsigned int bkt;
 738
 739	/* If we don't have any VFs, then there is nothing to reset */
 740	if (!ice_has_vfs(pf))
 741		return;
 742
 743	mutex_lock(&pf->vfs.table_lock);
 744
 745	/* clear all malicious info if the VFs are getting reset */
 746	ice_for_each_vf(pf, bkt, vf)
 747		ice_mbx_clear_malvf(&vf->mbx_info);
 748
 749	/* If VFs have been disabled, there is no need to reset */
 750	if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
 751		mutex_unlock(&pf->vfs.table_lock);
 752		return;
 753	}
 754
 755	/* Begin reset on all VFs at once */
 756	ice_for_each_vf(pf, bkt, vf)
 757		ice_trigger_vf_reset(vf, true, true);
 758
 759	/* HW requires some time to make sure it can flush the FIFO for a VF
 760	 * when it resets it. Now that we've triggered all of the VFs, iterate
 761	 * the table again and wait for each VF to complete.
 762	 */
 763	ice_for_each_vf(pf, bkt, vf) {
 764		if (!vf->vf_ops->poll_reset_status(vf)) {
 765			/* Display a warning if at least one VF didn't manage
 766			 * to reset in time, but continue on with the
 767			 * operation.
 768			 */
 769			dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
 770			break;
 771		}
 772	}
 773
 774	/* free VF resources to begin resetting the VSI state */
 775	ice_for_each_vf(pf, bkt, vf) {
 776		mutex_lock(&vf->cfg_lock);
 777
 778		ice_eswitch_detach(pf, vf);
 779		vf->driver_caps = 0;
 780		ice_vc_set_default_allowlist(vf);
 781
 782		ice_vf_fdir_exit(vf);
 783		ice_vf_fdir_init(vf);
 784		/* clean VF control VSI when resetting VFs since it should be
 785		 * setup only when VF creates its first FDIR rule.
 786		 */
 787		if (vf->ctrl_vsi_idx != ICE_NO_VSI)
 788			ice_vf_ctrl_invalidate_vsi(vf);
 789
 790		ice_vf_pre_vsi_rebuild(vf);
 791		ice_vf_rebuild_vsi(vf);
 792		ice_vf_post_vsi_rebuild(vf);
 793
 794		ice_eswitch_attach(pf, vf);
 795
 796		mutex_unlock(&vf->cfg_lock);
 797	}
 798
 799	ice_flush(hw);
 800	clear_bit(ICE_VF_DIS, pf->state);
 801
 802	mutex_unlock(&pf->vfs.table_lock);
 803}
 804
 805/**
 806 * ice_notify_vf_reset - Notify VF of a reset event
 807 * @vf: pointer to the VF structure
 808 */
 809static void ice_notify_vf_reset(struct ice_vf *vf)
 810{
 811	struct ice_hw *hw = &vf->pf->hw;
 812	struct virtchnl_pf_event pfe;
 813
 814	/* Bail out if VF is in disabled state, neither initialized, nor active
 815	 * state - otherwise proceed with notifications
 816	 */
 817	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
 818	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
 819	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
 820		return;
 821
 822	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 823	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 824	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
 825			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
 826			      NULL);
 827}
 828
 829/**
 830 * ice_reset_vf - Reset a particular VF
 831 * @vf: pointer to the VF structure
 832 * @flags: flags controlling behavior of the reset
 833 *
 834 * Flags:
 835 *   ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
 836 *   ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
 837 *   ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
 838 *
 839 * Returns 0 if the VF is currently in reset, if resets are disabled, or if
 840 * the VF resets successfully. Returns an error code if the VF fails to
 841 * rebuild.
 842 */
 843int ice_reset_vf(struct ice_vf *vf, u32 flags)
 844{
 845	struct ice_pf *pf = vf->pf;
 846	struct ice_lag *lag;
 847	struct ice_vsi *vsi;
 848	u8 act_prt, pri_prt;
 849	struct device *dev;
 850	int err = 0;
 851	bool rsd;
 852
 853	dev = ice_pf_to_dev(pf);
 854	act_prt = ICE_LAG_INVALID_PORT;
 855	pri_prt = pf->hw.port_info->lport;
 856
 857	if (flags & ICE_VF_RESET_NOTIFY)
 858		ice_notify_vf_reset(vf);
 859
 860	if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
 861		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
 862			vf->vf_id);
 863		return 0;
 864	}
 865
 
 
 
 
 
 866	lag = pf->lag;
 867	mutex_lock(&pf->lag_mutex);
 868	if (lag && lag->bonded && lag->primary) {
 869		act_prt = lag->active_port;
 870		if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
 871		    lag->upper_netdev)
 872			ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
 873		else
 874			act_prt = ICE_LAG_INVALID_PORT;
 875	}
 876
 877	if (flags & ICE_VF_RESET_LOCK)
 878		mutex_lock(&vf->cfg_lock);
 879	else
 880		lockdep_assert_held(&vf->cfg_lock);
 881
 882	if (ice_is_vf_disabled(vf)) {
 883		vsi = ice_get_vf_vsi(vf);
 884		if (!vsi) {
 885			dev_dbg(dev, "VF is already removed\n");
 886			err = -EINVAL;
 887			goto out_unlock;
 888		}
 889		ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
 890
 891		if (ice_vsi_is_rx_queue_active(vsi))
 892			ice_vsi_stop_all_rx_rings(vsi);
 893
 894		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
 895			vf->vf_id);
 896		goto out_unlock;
 897	}
 898
 899	/* Set VF disable bit state here, before triggering reset */
 900	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
 901	ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
 902
 903	vsi = ice_get_vf_vsi(vf);
 904	if (WARN_ON(!vsi)) {
 905		err = -EIO;
 906		goto out_unlock;
 907	}
 908
 909	ice_dis_vf_qs(vf);
 910
 911	/* Call Disable LAN Tx queue AQ whether or not queues are
 912	 * enabled. This is needed for successful completion of VFR.
 913	 */
 914	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
 915			NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
 916
 917	/* poll VPGEN_VFRSTAT reg to make sure
 918	 * that reset is complete
 919	 */
 920	rsd = vf->vf_ops->poll_reset_status(vf);
 921
 922	/* Display a warning if VF didn't manage to reset in time, but need to
 923	 * continue on with the operation.
 924	 */
 925	if (!rsd)
 926		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
 927
 928	vf->driver_caps = 0;
 929	ice_vc_set_default_allowlist(vf);
 930
 931	/* disable promiscuous modes in case they were enabled
 932	 * ignore any error if disabling process failed
 933	 */
 934	ice_vf_clear_all_promisc_modes(vf, vsi);
 935
 936	ice_vf_fdir_exit(vf);
 937	ice_vf_fdir_init(vf);
 938	/* clean VF control VSI when resetting VF since it should be setup
 939	 * only when VF creates its first FDIR rule.
 940	 */
 941	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
 942		ice_vf_ctrl_vsi_release(vf);
 943
 944	ice_vf_pre_vsi_rebuild(vf);
 945
 946	if (ice_vf_reconfig_vsi(vf)) {
 947		dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
 948			vf->vf_id);
 949		err = -EFAULT;
 950		goto out_unlock;
 951	}
 952
 953	ice_vf_post_vsi_rebuild(vf);
 954	vsi = ice_get_vf_vsi(vf);
 955	if (WARN_ON(!vsi)) {
 956		err = -EINVAL;
 957		goto out_unlock;
 958	}
 959
 960	ice_eswitch_update_repr(vf->repr_id, vsi);
 961
 962	/* if the VF has been reset allow it to come up again */
 963	ice_mbx_clear_malvf(&vf->mbx_info);
 964
 965out_unlock:
 966	if (flags & ICE_VF_RESET_LOCK)
 967		mutex_unlock(&vf->cfg_lock);
 968
 969	if (lag && lag->bonded && lag->primary &&
 970	    act_prt != ICE_LAG_INVALID_PORT)
 971		ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
 972	mutex_unlock(&pf->lag_mutex);
 973
 
 
 
 974	return err;
 975}
 976
 977/**
 978 * ice_set_vf_state_dis - Set VF state to disabled
 979 * @vf: pointer to the VF structure
 980 */
 981void ice_set_vf_state_dis(struct ice_vf *vf)
 982{
 983	ice_set_vf_state_qs_dis(vf);
 984	vf->vf_ops->clear_reset_state(vf);
 985}
 986
 987/* Private functions only accessed from other virtualization files */
 988
 989/**
 990 * ice_initialize_vf_entry - Initialize a VF entry
 991 * @vf: pointer to the VF structure
 992 */
 993void ice_initialize_vf_entry(struct ice_vf *vf)
 994{
 995	struct ice_pf *pf = vf->pf;
 996	struct ice_vfs *vfs;
 997
 998	vfs = &pf->vfs;
 999
1000	/* assign default capabilities */
1001	vf->spoofchk = true;
1002	vf->num_vf_qs = vfs->num_qps_per;
1003	ice_vc_set_default_allowlist(vf);
1004	ice_virtchnl_set_dflt_ops(vf);
1005
 
 
 
 
1006	/* ctrl_vsi_idx will be set to a valid value only when iAVF
1007	 * creates its first fdir rule.
1008	 */
1009	ice_vf_ctrl_invalidate_vsi(vf);
1010	ice_vf_fdir_init(vf);
1011
1012	/* Initialize mailbox info for this VF */
1013	ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
 
 
 
1014
1015	mutex_init(&vf->cfg_lock);
1016}
1017
 
 
 
 
 
 
 
 
1018/**
1019 * ice_dis_vf_qs - Disable the VF queues
1020 * @vf: pointer to the VF structure
1021 */
1022void ice_dis_vf_qs(struct ice_vf *vf)
1023{
1024	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1025
1026	if (WARN_ON(!vsi))
1027		return;
1028
1029	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1030	ice_vsi_stop_all_rx_rings(vsi);
1031	ice_set_vf_state_qs_dis(vf);
1032}
1033
1034/**
1035 * ice_err_to_virt_err - translate errors for VF return code
1036 * @err: error return code
1037 */
1038enum virtchnl_status_code ice_err_to_virt_err(int err)
1039{
1040	switch (err) {
1041	case 0:
1042		return VIRTCHNL_STATUS_SUCCESS;
1043	case -EINVAL:
1044	case -ENODEV:
1045		return VIRTCHNL_STATUS_ERR_PARAM;
1046	case -ENOMEM:
1047		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1048	case -EALREADY:
1049	case -EBUSY:
1050	case -EIO:
1051	case -ENOSPC:
1052		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1053	default:
1054		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1055	}
1056}
1057
1058/**
1059 * ice_check_vf_init - helper to check if VF init complete
1060 * @vf: the pointer to the VF to check
1061 */
1062int ice_check_vf_init(struct ice_vf *vf)
1063{
1064	struct ice_pf *pf = vf->pf;
1065
1066	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1067		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1068			vf->vf_id);
1069		return -EBUSY;
1070	}
1071	return 0;
1072}
1073
1074/**
1075 * ice_vf_get_port_info - Get the VF's port info structure
1076 * @vf: VF used to get the port info structure for
1077 */
1078struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1079{
1080	return vf->pf->hw.port_info;
1081}
1082
1083/**
1084 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1085 * @vsi: the VSI to configure
1086 * @enable: whether to enable or disable the spoof checking
1087 *
1088 * Configure a VSI to enable (or disable) spoof checking behavior.
1089 */
1090static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1091{
1092	struct ice_vsi_ctx *ctx;
1093	int err;
1094
1095	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1096	if (!ctx)
1097		return -ENOMEM;
1098
1099	ctx->info.sec_flags = vsi->info.sec_flags;
1100	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1101
1102	if (enable)
1103		ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1104	else
1105		ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1106
1107	err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1108	if (err)
1109		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1110			enable ? "ON" : "OFF", vsi->vsi_num, err);
1111	else
1112		vsi->info.sec_flags = ctx->info.sec_flags;
1113
1114	kfree(ctx);
1115
1116	return err;
1117}
1118
1119/**
1120 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1121 * @vsi: VSI to enable Tx spoof checking for
1122 */
1123static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1124{
1125	struct ice_vsi_vlan_ops *vlan_ops;
1126	int err = 0;
1127
1128	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1129
1130	/* Allow VF with VLAN 0 only to send all tagged traffic */
1131	if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1132		err = vlan_ops->ena_tx_filtering(vsi);
1133		if (err)
1134			return err;
1135	}
1136
1137	return ice_cfg_mac_antispoof(vsi, true);
1138}
1139
1140/**
1141 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1142 * @vsi: VSI to disable Tx spoof checking for
1143 */
1144static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1145{
1146	struct ice_vsi_vlan_ops *vlan_ops;
1147	int err;
1148
1149	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1150
1151	err = vlan_ops->dis_tx_filtering(vsi);
1152	if (err)
1153		return err;
1154
1155	return ice_cfg_mac_antispoof(vsi, false);
1156}
1157
1158/**
1159 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1160 * @vsi: VSI associated to the VF
1161 * @enable: whether to enable or disable the spoof checking
1162 */
1163int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1164{
1165	int err;
1166
1167	if (enable)
1168		err = ice_vsi_ena_spoofchk(vsi);
1169	else
1170		err = ice_vsi_dis_spoofchk(vsi);
1171
1172	return err;
1173}
1174
1175/**
1176 * ice_is_vf_trusted
1177 * @vf: pointer to the VF info
1178 */
1179bool ice_is_vf_trusted(struct ice_vf *vf)
1180{
1181	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1182}
1183
1184/**
1185 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1186 * @vf: the VF to check
1187 *
1188 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1189 * otherwise
1190 */
1191bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1192{
1193	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1194		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1195}
1196
1197/**
1198 * ice_is_vf_link_up - check if the VF's link is up
1199 * @vf: VF to check if link is up
1200 */
1201bool ice_is_vf_link_up(struct ice_vf *vf)
1202{
1203	struct ice_port_info *pi = ice_vf_get_port_info(vf);
1204
1205	if (ice_check_vf_init(vf))
1206		return false;
1207
1208	if (ice_vf_has_no_qs_ena(vf))
1209		return false;
1210	else if (vf->link_forced)
1211		return vf->link_up;
1212	else
1213		return pi->phy.link_info.link_info &
1214			ICE_AQ_LINK_UP;
1215}
1216
1217/**
1218 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1219 * @vf: VF that control VSI is being invalidated on
1220 */
1221void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1222{
1223	vf->ctrl_vsi_idx = ICE_NO_VSI;
1224}
1225
1226/**
1227 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1228 * @vf: VF that control VSI is being released on
1229 */
1230void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1231{
1232	ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1233	ice_vf_ctrl_invalidate_vsi(vf);
1234}
1235
1236/**
1237 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1238 * @vf: VF to setup control VSI for
1239 *
1240 * Returns pointer to the successfully allocated VSI struct on success,
1241 * otherwise returns NULL on failure.
1242 */
1243struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1244{
1245	struct ice_vsi_cfg_params params = {};
1246	struct ice_pf *pf = vf->pf;
1247	struct ice_vsi *vsi;
1248
1249	params.type = ICE_VSI_CTRL;
1250	params.pi = ice_vf_get_port_info(vf);
1251	params.vf = vf;
1252	params.flags = ICE_VSI_FLAG_INIT;
1253
1254	vsi = ice_vsi_setup(pf, &params);
1255	if (!vsi) {
1256		dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1257		ice_vf_ctrl_invalidate_vsi(vf);
1258	}
1259
1260	return vsi;
1261}
1262
1263/**
1264 * ice_vf_init_host_cfg - Initialize host admin configuration
1265 * @vf: VF to initialize
1266 * @vsi: the VSI created at initialization
1267 *
1268 * Initialize the VF host configuration. Called during VF creation to setup
1269 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1270 * should only be called during VF creation.
1271 */
1272int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1273{
1274	struct ice_vsi_vlan_ops *vlan_ops;
1275	struct ice_pf *pf = vf->pf;
1276	u8 broadcast[ETH_ALEN];
1277	struct device *dev;
1278	int err;
1279
1280	dev = ice_pf_to_dev(pf);
1281
1282	err = ice_vsi_add_vlan_zero(vsi);
1283	if (err) {
1284		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1285			 vf->vf_id);
1286		return err;
1287	}
1288
1289	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1290	err = vlan_ops->ena_rx_filtering(vsi);
1291	if (err) {
1292		dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1293			 vf->vf_id);
1294		return err;
1295	}
1296
1297	eth_broadcast_addr(broadcast);
1298	err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1299	if (err) {
1300		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1301			vf->vf_id, err);
1302		return err;
1303	}
1304
1305	vf->num_mac = 1;
1306
1307	err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1308	if (err) {
1309		dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1310			 vf->vf_id);
1311		return err;
1312	}
1313
1314	return 0;
1315}
1316
1317/**
1318 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
1319 * @vf: VF to remove access to VSI for
1320 */
1321void ice_vf_invalidate_vsi(struct ice_vf *vf)
1322{
1323	vf->lan_vsi_idx = ICE_NO_VSI;
1324	vf->lan_vsi_num = ICE_NO_VSI;
1325}
1326
1327/**
1328 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1329 * @vf: pointer to the VF structure
1330 *
1331 * Release the VF associated with this VSI and then invalidate the VSI
1332 * indexes.
1333 */
1334void ice_vf_vsi_release(struct ice_vf *vf)
1335{
1336	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1337
1338	if (WARN_ON(!vsi))
1339		return;
1340
1341	ice_vsi_release(vsi);
1342	ice_vf_invalidate_vsi(vf);
1343}
1344
1345/**
1346 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1347 * @pf: the PF private structure
1348 * @vsi: pointer to the VSI
1349 *
1350 * Return first found VF control VSI other than the vsi
1351 * passed by parameter. This function is used to determine
1352 * whether new resources have to be allocated for control VSI
1353 * or they can be shared with existing one.
1354 *
1355 * Return found VF control VSI pointer other itself. Return
1356 * NULL Otherwise.
1357 *
1358 */
1359struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1360{
1361	struct ice_vsi *ctrl_vsi = NULL;
1362	struct ice_vf *vf;
1363	unsigned int bkt;
1364
1365	rcu_read_lock();
1366	ice_for_each_vf_rcu(pf, bkt, vf) {
1367		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1368			ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1369			break;
1370		}
1371	}
1372
1373	rcu_read_unlock();
1374	return ctrl_vsi;
1375}