Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2022, Intel Corporation. */
3
4#include "ice_vf_lib_private.h"
5#include "ice.h"
6#include "ice_lib.h"
7#include "ice_fltr.h"
8#include "ice_virtchnl_allowlist.h"
9
10/* Public functions which may be accessed by all driver files */
11
12/**
13 * ice_get_vf_by_id - Get pointer to VF by ID
14 * @pf: the PF private structure
15 * @vf_id: the VF ID to locate
16 *
17 * Locate and return a pointer to the VF structure associated with a given ID.
18 * Returns NULL if the ID does not have a valid VF structure associated with
19 * it.
20 *
21 * This function takes a reference to the VF, which must be released by
22 * calling ice_put_vf() once the caller is finished accessing the VF structure
23 * returned.
24 */
25struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26{
27 struct ice_vf *vf;
28
29 rcu_read_lock();
30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 if (vf->vf_id == vf_id) {
32 struct ice_vf *found;
33
34 if (kref_get_unless_zero(&vf->refcnt))
35 found = vf;
36 else
37 found = NULL;
38
39 rcu_read_unlock();
40 return found;
41 }
42 }
43 rcu_read_unlock();
44
45 return NULL;
46}
47
48/**
49 * ice_release_vf - Release VF associated with a refcount
50 * @ref: the kref decremented to zero
51 *
52 * Callback function for kref_put to release a VF once its reference count has
53 * hit zero.
54 */
55static void ice_release_vf(struct kref *ref)
56{
57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58
59 pci_dev_put(vf->vfdev);
60
61 vf->vf_ops->free(vf);
62}
63
64/**
65 * ice_put_vf - Release a reference to a VF
66 * @vf: the VF structure to decrease reference count on
67 *
68 * Decrease the reference count for a VF, and free the entry if it is no
69 * longer in use.
70 *
71 * This must be called after ice_get_vf_by_id() once the reference to the VF
72 * structure is no longer used. Otherwise, the VF structure will never be
73 * freed.
74 */
75void ice_put_vf(struct ice_vf *vf)
76{
77 kref_put(&vf->refcnt, ice_release_vf);
78}
79
80/**
81 * ice_has_vfs - Return true if the PF has any associated VFs
82 * @pf: the PF private structure
83 *
84 * Return whether or not the PF has any allocated VFs.
85 *
86 * Note that this function only guarantees that there are no VFs at the point
87 * of calling it. It does not guarantee that no more VFs will be added.
88 */
89bool ice_has_vfs(struct ice_pf *pf)
90{
91 /* A simple check that the hash table is not empty does not require
92 * the mutex or rcu_read_lock.
93 */
94 return !hash_empty(pf->vfs.table);
95}
96
97/**
98 * ice_get_num_vfs - Get number of allocated VFs
99 * @pf: the PF private structure
100 *
101 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
102 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
103 * the output of this function.
104 */
105u16 ice_get_num_vfs(struct ice_pf *pf)
106{
107 struct ice_vf *vf;
108 unsigned int bkt;
109 u16 num_vfs = 0;
110
111 rcu_read_lock();
112 ice_for_each_vf_rcu(pf, bkt, vf)
113 num_vfs++;
114 rcu_read_unlock();
115
116 return num_vfs;
117}
118
119/**
120 * ice_get_vf_vsi - get VF's VSI based on the stored index
121 * @vf: VF used to get VSI
122 */
123struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
124{
125 if (vf->lan_vsi_idx == ICE_NO_VSI)
126 return NULL;
127
128 return vf->pf->vsi[vf->lan_vsi_idx];
129}
130
131/**
132 * ice_is_vf_disabled
133 * @vf: pointer to the VF info
134 *
135 * If the PF has been disabled, there is no need resetting VF until PF is
136 * active again. Similarly, if the VF has been disabled, this means something
137 * else is resetting the VF, so we shouldn't continue.
138 *
139 * Returns true if the caller should consider the VF as disabled whether
140 * because that single VF is explicitly disabled or because the PF is
141 * currently disabled.
142 */
143bool ice_is_vf_disabled(struct ice_vf *vf)
144{
145 struct ice_pf *pf = vf->pf;
146
147 return (test_bit(ICE_VF_DIS, pf->state) ||
148 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
149}
150
151/**
152 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
153 * @vf: The VF being resseting
154 *
155 * The max poll time is about ~800ms, which is about the maximum time it takes
156 * for a VF to be reset and/or a VF driver to be removed.
157 */
158static void ice_wait_on_vf_reset(struct ice_vf *vf)
159{
160 int i;
161
162 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
163 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
164 break;
165 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
166 }
167}
168
169/**
170 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
171 * @vf: VF to check if it's ready to be configured/queried
172 *
173 * The purpose of this function is to make sure the VF is not in reset, not
174 * disabled, and initialized so it can be configured and/or queried by a host
175 * administrator.
176 */
177int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
178{
179 ice_wait_on_vf_reset(vf);
180
181 if (ice_is_vf_disabled(vf))
182 return -EINVAL;
183
184 if (ice_check_vf_init(vf))
185 return -EBUSY;
186
187 return 0;
188}
189
190/**
191 * ice_trigger_vf_reset - Reset a VF on HW
192 * @vf: pointer to the VF structure
193 * @is_vflr: true if VFLR was issued, false if not
194 * @is_pfr: true if the reset was triggered due to a previous PFR
195 *
196 * Trigger hardware to start a reset for a particular VF. Expects the caller
197 * to wait the proper amount of time to allow hardware to reset the VF before
198 * it cleans up and restores VF functionality.
199 */
200static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
201{
202 /* Inform VF that it is no longer active, as a warning */
203 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
204
205 /* Disable VF's configuration API during reset. The flag is re-enabled
206 * when it's safe again to access VF's VSI.
207 */
208 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
209
210 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
211 * needs to clear them in the case of VFR/VFLR. If this is done for
212 * PFR, it can mess up VF resets because the VF driver may already
213 * have started cleanup by the time we get here.
214 */
215 if (!is_pfr)
216 vf->vf_ops->clear_mbx_register(vf);
217
218 vf->vf_ops->trigger_reset_register(vf, is_vflr);
219}
220
221static void ice_vf_clear_counters(struct ice_vf *vf)
222{
223 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
224
225 if (vsi)
226 vsi->num_vlan = 0;
227
228 vf->num_mac = 0;
229 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
230 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
231}
232
233/**
234 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
235 * @vf: VF to perform pre VSI rebuild tasks
236 *
237 * These tasks are items that don't need to be amortized since they are most
238 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
239 */
240static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
241{
242 /* Close any IRQ mapping now */
243 if (vf->vf_ops->irq_close)
244 vf->vf_ops->irq_close(vf);
245
246 ice_vf_clear_counters(vf);
247 vf->vf_ops->clear_reset_trigger(vf);
248}
249
250/**
251 * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
252 * @vf: VF to reconfigure the VSI for
253 *
254 * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
255 * configuration change, etc).
256 *
257 * It brings the VSI down and then reconfigures it with the hardware.
258 */
259int ice_vf_reconfig_vsi(struct ice_vf *vf)
260{
261 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
262 struct ice_vsi_cfg_params params = {};
263 struct ice_pf *pf = vf->pf;
264 int err;
265
266 if (WARN_ON(!vsi))
267 return -EINVAL;
268
269 params = ice_vsi_to_params(vsi);
270 params.flags = ICE_VSI_FLAG_NO_INIT;
271
272 ice_vsi_decfg(vsi);
273 ice_fltr_remove_all(vsi);
274
275 err = ice_vsi_cfg(vsi, ¶ms);
276 if (err) {
277 dev_err(ice_pf_to_dev(pf),
278 "Failed to reconfigure the VF%u's VSI, error %d\n",
279 vf->vf_id, err);
280 return err;
281 }
282
283 return 0;
284}
285
286/**
287 * ice_vf_rebuild_vsi - rebuild the VF's VSI
288 * @vf: VF to rebuild the VSI for
289 *
290 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
291 * host, PFR, CORER, etc.).
292 *
293 * It reprograms the VSI configuration back into hardware.
294 */
295static int ice_vf_rebuild_vsi(struct ice_vf *vf)
296{
297 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
298 struct ice_pf *pf = vf->pf;
299
300 if (WARN_ON(!vsi))
301 return -EINVAL;
302
303 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
304 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
305 vf->vf_id);
306 return -EIO;
307 }
308 /* vsi->idx will remain the same in this case so don't update
309 * vf->lan_vsi_idx
310 */
311 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
312
313 return 0;
314}
315
316/**
317 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
318 * @vf: VF to add MAC filters for
319 * @vsi: Pointer to VSI
320 *
321 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
322 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
323 */
324static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
325{
326 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
327 struct device *dev = ice_pf_to_dev(vf->pf);
328 int err;
329
330 if (ice_vf_is_port_vlan_ena(vf)) {
331 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
332 if (err) {
333 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
334 vf->vf_id, err);
335 return err;
336 }
337
338 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
339 } else {
340 err = ice_vsi_add_vlan_zero(vsi);
341 }
342
343 if (err) {
344 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
345 ice_vf_is_port_vlan_ena(vf) ?
346 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
347 return err;
348 }
349
350 err = vlan_ops->ena_rx_filtering(vsi);
351 if (err)
352 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
353 vf->vf_id, vsi->idx, err);
354
355 return 0;
356}
357
358/**
359 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
360 * @vf: VF to re-apply the configuration for
361 *
362 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
363 * needs to re-apply the host configured Tx rate limiting configuration.
364 */
365static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
366{
367 struct device *dev = ice_pf_to_dev(vf->pf);
368 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
369 int err;
370
371 if (WARN_ON(!vsi))
372 return -EINVAL;
373
374 if (vf->min_tx_rate) {
375 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
376 if (err) {
377 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
378 vf->min_tx_rate, vf->vf_id, err);
379 return err;
380 }
381 }
382
383 if (vf->max_tx_rate) {
384 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
385 if (err) {
386 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
387 vf->max_tx_rate, vf->vf_id, err);
388 return err;
389 }
390 }
391
392 return 0;
393}
394
395/**
396 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
397 * @vf: VF to configure trust setting for
398 */
399static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
400{
401 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
402}
403
404/**
405 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
406 * @vf: VF to add MAC filters for
407 *
408 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
409 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
410 */
411static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
412{
413 struct device *dev = ice_pf_to_dev(vf->pf);
414 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
415 u8 broadcast[ETH_ALEN];
416 int status;
417
418 if (WARN_ON(!vsi))
419 return -EINVAL;
420
421 if (ice_is_eswitch_mode_switchdev(vf->pf))
422 return 0;
423
424 eth_broadcast_addr(broadcast);
425 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
426 if (status) {
427 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
428 vf->vf_id, status);
429 return status;
430 }
431
432 vf->num_mac++;
433
434 if (is_valid_ether_addr(vf->hw_lan_addr)) {
435 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
436 ICE_FWD_TO_VSI);
437 if (status) {
438 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
439 &vf->hw_lan_addr[0], vf->vf_id,
440 status);
441 return status;
442 }
443 vf->num_mac++;
444
445 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
446 }
447
448 return 0;
449}
450
451/**
452 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
453 * @vsi: Pointer to VSI
454 *
455 * This function moves VSI into corresponding scheduler aggregator node
456 * based on cached value of "aggregator node info" per VSI
457 */
458static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
459{
460 struct ice_pf *pf = vsi->back;
461 struct device *dev;
462 int status;
463
464 if (!vsi->agg_node)
465 return;
466
467 dev = ice_pf_to_dev(pf);
468 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
469 dev_dbg(dev,
470 "agg_id %u already has reached max_num_vsis %u\n",
471 vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
472 return;
473 }
474
475 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
476 vsi->idx, vsi->tc_cfg.ena_tc);
477 if (status)
478 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
479 vsi->idx, vsi->agg_node->agg_id);
480 else
481 vsi->agg_node->num_vsis++;
482}
483
484/**
485 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
486 * @vf: VF to rebuild host configuration on
487 */
488static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
489{
490 struct device *dev = ice_pf_to_dev(vf->pf);
491 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
492
493 if (WARN_ON(!vsi))
494 return;
495
496 ice_vf_set_host_trust_cfg(vf);
497
498 if (ice_vf_rebuild_host_mac_cfg(vf))
499 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
500 vf->vf_id);
501
502 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
503 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
504 vf->vf_id);
505
506 if (ice_vf_rebuild_host_tx_rate_cfg(vf))
507 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
508 vf->vf_id);
509
510 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
511 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
512 vf->vf_id);
513
514 /* rebuild aggregator node config for main VF VSI */
515 ice_vf_rebuild_aggregator_node_cfg(vsi);
516}
517
518/**
519 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
520 * @vf: pointer to the VF structure
521 */
522static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
523{
524 /* Clear Rx/Tx enabled queues flag */
525 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
526 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
527 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
528}
529
530/**
531 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
532 * @vf: VF to set in initialized state
533 *
534 * After this function the VF will be ready to receive/handle the
535 * VIRTCHNL_OP_GET_VF_RESOURCES message
536 */
537static void ice_vf_set_initialized(struct ice_vf *vf)
538{
539 ice_set_vf_state_qs_dis(vf);
540 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
541 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
542 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
543 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
544 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
545}
546
547/**
548 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
549 * @vf: the VF being reset
550 *
551 * Perform reset tasks which must occur after the VSI has been re-created or
552 * rebuilt during a VF reset.
553 */
554static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
555{
556 ice_vf_rebuild_host_cfg(vf);
557 ice_vf_set_initialized(vf);
558
559 vf->vf_ops->post_vsi_rebuild(vf);
560}
561
562/**
563 * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
564 * are in unicast promiscuous mode
565 * @pf: PF structure for accessing VF(s)
566 *
567 * Return false if no VF(s) are in unicast promiscuous mode,
568 * else return true
569 */
570bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
571{
572 bool is_vf_promisc = false;
573 struct ice_vf *vf;
574 unsigned int bkt;
575
576 rcu_read_lock();
577 ice_for_each_vf_rcu(pf, bkt, vf) {
578 /* found a VF that has promiscuous mode configured */
579 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
580 is_vf_promisc = true;
581 break;
582 }
583 }
584 rcu_read_unlock();
585
586 return is_vf_promisc;
587}
588
589/**
590 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
591 * @vf: the VF pointer
592 * @vsi: the VSI to configure
593 * @ucast_m: promiscuous mask to apply to unicast
594 * @mcast_m: promiscuous mask to apply to multicast
595 *
596 * Decide which mask should be used for unicast and multicast filter,
597 * based on presence of VLANs
598 */
599void
600ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
601 u8 *ucast_m, u8 *mcast_m)
602{
603 if (ice_vf_is_port_vlan_ena(vf) ||
604 ice_vsi_has_non_zero_vlans(vsi)) {
605 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
606 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
607 } else {
608 *mcast_m = ICE_MCAST_PROMISC_BITS;
609 *ucast_m = ICE_UCAST_PROMISC_BITS;
610 }
611}
612
613/**
614 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
615 * @vf: the VF pointer
616 * @vsi: the VSI to configure
617 *
618 * Clear all promiscuous/allmulticast filters for a VF
619 */
620static int
621ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
622{
623 struct ice_pf *pf = vf->pf;
624 u8 ucast_m, mcast_m;
625 int ret = 0;
626
627 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
628 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
629 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
630 if (ice_is_dflt_vsi_in_use(vsi->port_info))
631 ret = ice_clear_dflt_vsi(vsi);
632 } else {
633 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
634 }
635
636 if (ret) {
637 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
638 } else {
639 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
640 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
641 }
642 }
643
644 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
645 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
646 if (ret) {
647 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
648 } else {
649 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
650 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
651 }
652 }
653 return ret;
654}
655
656/**
657 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
658 * @vf: the VF to configure
659 * @vsi: the VF's VSI
660 * @promisc_m: the promiscuous mode to enable
661 */
662int
663ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
664{
665 struct ice_hw *hw = &vsi->back->hw;
666 int status;
667
668 if (ice_vf_is_port_vlan_ena(vf))
669 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
670 ice_vf_get_port_vlan_id(vf));
671 else if (ice_vsi_has_non_zero_vlans(vsi))
672 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
673 else
674 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
675
676 if (status && status != -EEXIST) {
677 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
678 vf->vf_id, status);
679 return status;
680 }
681
682 return 0;
683}
684
685/**
686 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
687 * @vf: the VF to configure
688 * @vsi: the VF's VSI
689 * @promisc_m: the promiscuous mode to disable
690 */
691int
692ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
693{
694 struct ice_hw *hw = &vsi->back->hw;
695 int status;
696
697 if (ice_vf_is_port_vlan_ena(vf))
698 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
699 ice_vf_get_port_vlan_id(vf));
700 else if (ice_vsi_has_non_zero_vlans(vsi))
701 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
702 else
703 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
704
705 if (status && status != -ENOENT) {
706 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
707 vf->vf_id, status);
708 return status;
709 }
710
711 return 0;
712}
713
714/**
715 * ice_reset_all_vfs - reset all allocated VFs in one go
716 * @pf: pointer to the PF structure
717 *
718 * Reset all VFs at once, in response to a PF or other device reset.
719 *
720 * First, tell the hardware to reset each VF, then do all the waiting in one
721 * chunk, and finally finish restoring each VF after the wait. This is useful
722 * during PF routines which need to reset all VFs, as otherwise it must perform
723 * these resets in a serialized fashion.
724 */
725void ice_reset_all_vfs(struct ice_pf *pf)
726{
727 struct device *dev = ice_pf_to_dev(pf);
728 struct ice_hw *hw = &pf->hw;
729 struct ice_vf *vf;
730 unsigned int bkt;
731
732 /* If we don't have any VFs, then there is nothing to reset */
733 if (!ice_has_vfs(pf))
734 return;
735
736 mutex_lock(&pf->vfs.table_lock);
737
738 /* clear all malicious info if the VFs are getting reset */
739 ice_for_each_vf(pf, bkt, vf)
740 ice_mbx_clear_malvf(&vf->mbx_info);
741
742 /* If VFs have been disabled, there is no need to reset */
743 if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
744 mutex_unlock(&pf->vfs.table_lock);
745 return;
746 }
747
748 /* Begin reset on all VFs at once */
749 ice_for_each_vf(pf, bkt, vf)
750 ice_trigger_vf_reset(vf, true, true);
751
752 /* HW requires some time to make sure it can flush the FIFO for a VF
753 * when it resets it. Now that we've triggered all of the VFs, iterate
754 * the table again and wait for each VF to complete.
755 */
756 ice_for_each_vf(pf, bkt, vf) {
757 if (!vf->vf_ops->poll_reset_status(vf)) {
758 /* Display a warning if at least one VF didn't manage
759 * to reset in time, but continue on with the
760 * operation.
761 */
762 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
763 break;
764 }
765 }
766
767 /* free VF resources to begin resetting the VSI state */
768 ice_for_each_vf(pf, bkt, vf) {
769 mutex_lock(&vf->cfg_lock);
770
771 ice_eswitch_detach(pf, vf);
772 vf->driver_caps = 0;
773 ice_vc_set_default_allowlist(vf);
774
775 ice_vf_fdir_exit(vf);
776 ice_vf_fdir_init(vf);
777 /* clean VF control VSI when resetting VFs since it should be
778 * setup only when VF creates its first FDIR rule.
779 */
780 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
781 ice_vf_ctrl_invalidate_vsi(vf);
782
783 ice_vf_pre_vsi_rebuild(vf);
784 ice_vf_rebuild_vsi(vf);
785 ice_vf_post_vsi_rebuild(vf);
786
787 ice_eswitch_attach(pf, vf);
788
789 mutex_unlock(&vf->cfg_lock);
790 }
791
792 ice_flush(hw);
793 clear_bit(ICE_VF_DIS, pf->state);
794
795 mutex_unlock(&pf->vfs.table_lock);
796}
797
798/**
799 * ice_notify_vf_reset - Notify VF of a reset event
800 * @vf: pointer to the VF structure
801 */
802static void ice_notify_vf_reset(struct ice_vf *vf)
803{
804 struct ice_hw *hw = &vf->pf->hw;
805 struct virtchnl_pf_event pfe;
806
807 /* Bail out if VF is in disabled state, neither initialized, nor active
808 * state - otherwise proceed with notifications
809 */
810 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
811 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
812 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
813 return;
814
815 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
816 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
817 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
818 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
819 NULL);
820}
821
822/**
823 * ice_reset_vf - Reset a particular VF
824 * @vf: pointer to the VF structure
825 * @flags: flags controlling behavior of the reset
826 *
827 * Flags:
828 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
829 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
830 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
831 *
832 * Returns 0 if the VF is currently in reset, if resets are disabled, or if
833 * the VF resets successfully. Returns an error code if the VF fails to
834 * rebuild.
835 */
836int ice_reset_vf(struct ice_vf *vf, u32 flags)
837{
838 struct ice_pf *pf = vf->pf;
839 struct ice_lag *lag;
840 struct ice_vsi *vsi;
841 u8 act_prt, pri_prt;
842 struct device *dev;
843 int err = 0;
844 bool rsd;
845
846 dev = ice_pf_to_dev(pf);
847 act_prt = ICE_LAG_INVALID_PORT;
848 pri_prt = pf->hw.port_info->lport;
849
850 if (flags & ICE_VF_RESET_NOTIFY)
851 ice_notify_vf_reset(vf);
852
853 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
854 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
855 vf->vf_id);
856 return 0;
857 }
858
859 if (flags & ICE_VF_RESET_LOCK)
860 mutex_lock(&vf->cfg_lock);
861 else
862 lockdep_assert_held(&vf->cfg_lock);
863
864 lag = pf->lag;
865 mutex_lock(&pf->lag_mutex);
866 if (lag && lag->bonded && lag->primary) {
867 act_prt = lag->active_port;
868 if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
869 lag->upper_netdev)
870 ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
871 else
872 act_prt = ICE_LAG_INVALID_PORT;
873 }
874
875 if (ice_is_vf_disabled(vf)) {
876 vsi = ice_get_vf_vsi(vf);
877 if (!vsi) {
878 dev_dbg(dev, "VF is already removed\n");
879 err = -EINVAL;
880 goto out_unlock;
881 }
882 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
883
884 if (ice_vsi_is_rx_queue_active(vsi))
885 ice_vsi_stop_all_rx_rings(vsi);
886
887 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
888 vf->vf_id);
889 goto out_unlock;
890 }
891
892 /* Set VF disable bit state here, before triggering reset */
893 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
894 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
895
896 vsi = ice_get_vf_vsi(vf);
897 if (WARN_ON(!vsi)) {
898 err = -EIO;
899 goto out_unlock;
900 }
901
902 ice_dis_vf_qs(vf);
903
904 /* Call Disable LAN Tx queue AQ whether or not queues are
905 * enabled. This is needed for successful completion of VFR.
906 */
907 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
908 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
909
910 /* poll VPGEN_VFRSTAT reg to make sure
911 * that reset is complete
912 */
913 rsd = vf->vf_ops->poll_reset_status(vf);
914
915 /* Display a warning if VF didn't manage to reset in time, but need to
916 * continue on with the operation.
917 */
918 if (!rsd)
919 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
920
921 vf->driver_caps = 0;
922 ice_vc_set_default_allowlist(vf);
923
924 /* disable promiscuous modes in case they were enabled
925 * ignore any error if disabling process failed
926 */
927 ice_vf_clear_all_promisc_modes(vf, vsi);
928
929 ice_vf_fdir_exit(vf);
930 ice_vf_fdir_init(vf);
931 /* clean VF control VSI when resetting VF since it should be setup
932 * only when VF creates its first FDIR rule.
933 */
934 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
935 ice_vf_ctrl_vsi_release(vf);
936
937 ice_vf_pre_vsi_rebuild(vf);
938
939 if (ice_vf_reconfig_vsi(vf)) {
940 dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
941 vf->vf_id);
942 err = -EFAULT;
943 goto out_unlock;
944 }
945
946 ice_vf_post_vsi_rebuild(vf);
947 vsi = ice_get_vf_vsi(vf);
948 if (WARN_ON(!vsi)) {
949 err = -EINVAL;
950 goto out_unlock;
951 }
952
953 ice_eswitch_update_repr(vf->repr_id, vsi);
954
955 /* if the VF has been reset allow it to come up again */
956 ice_mbx_clear_malvf(&vf->mbx_info);
957
958out_unlock:
959 if (lag && lag->bonded && lag->primary &&
960 act_prt != ICE_LAG_INVALID_PORT)
961 ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
962 mutex_unlock(&pf->lag_mutex);
963
964 if (flags & ICE_VF_RESET_LOCK)
965 mutex_unlock(&vf->cfg_lock);
966
967 return err;
968}
969
970/**
971 * ice_set_vf_state_dis - Set VF state to disabled
972 * @vf: pointer to the VF structure
973 */
974void ice_set_vf_state_dis(struct ice_vf *vf)
975{
976 ice_set_vf_state_qs_dis(vf);
977 vf->vf_ops->clear_reset_state(vf);
978}
979
980/* Private functions only accessed from other virtualization files */
981
982/**
983 * ice_initialize_vf_entry - Initialize a VF entry
984 * @vf: pointer to the VF structure
985 */
986void ice_initialize_vf_entry(struct ice_vf *vf)
987{
988 struct ice_pf *pf = vf->pf;
989 struct ice_vfs *vfs;
990
991 vfs = &pf->vfs;
992
993 /* assign default capabilities */
994 vf->spoofchk = true;
995 vf->num_vf_qs = vfs->num_qps_per;
996 ice_vc_set_default_allowlist(vf);
997 ice_virtchnl_set_dflt_ops(vf);
998
999 /* ctrl_vsi_idx will be set to a valid value only when iAVF
1000 * creates its first fdir rule.
1001 */
1002 ice_vf_ctrl_invalidate_vsi(vf);
1003 ice_vf_fdir_init(vf);
1004
1005 /* Initialize mailbox info for this VF */
1006 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
1007
1008 mutex_init(&vf->cfg_lock);
1009}
1010
1011/**
1012 * ice_dis_vf_qs - Disable the VF queues
1013 * @vf: pointer to the VF structure
1014 */
1015void ice_dis_vf_qs(struct ice_vf *vf)
1016{
1017 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1018
1019 if (WARN_ON(!vsi))
1020 return;
1021
1022 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1023 ice_vsi_stop_all_rx_rings(vsi);
1024 ice_set_vf_state_qs_dis(vf);
1025}
1026
1027/**
1028 * ice_err_to_virt_err - translate errors for VF return code
1029 * @err: error return code
1030 */
1031enum virtchnl_status_code ice_err_to_virt_err(int err)
1032{
1033 switch (err) {
1034 case 0:
1035 return VIRTCHNL_STATUS_SUCCESS;
1036 case -EINVAL:
1037 case -ENODEV:
1038 return VIRTCHNL_STATUS_ERR_PARAM;
1039 case -ENOMEM:
1040 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1041 case -EALREADY:
1042 case -EBUSY:
1043 case -EIO:
1044 case -ENOSPC:
1045 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1046 default:
1047 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1048 }
1049}
1050
1051/**
1052 * ice_check_vf_init - helper to check if VF init complete
1053 * @vf: the pointer to the VF to check
1054 */
1055int ice_check_vf_init(struct ice_vf *vf)
1056{
1057 struct ice_pf *pf = vf->pf;
1058
1059 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1060 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1061 vf->vf_id);
1062 return -EBUSY;
1063 }
1064 return 0;
1065}
1066
1067/**
1068 * ice_vf_get_port_info - Get the VF's port info structure
1069 * @vf: VF used to get the port info structure for
1070 */
1071struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1072{
1073 return vf->pf->hw.port_info;
1074}
1075
1076/**
1077 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1078 * @vsi: the VSI to configure
1079 * @enable: whether to enable or disable the spoof checking
1080 *
1081 * Configure a VSI to enable (or disable) spoof checking behavior.
1082 */
1083static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1084{
1085 struct ice_vsi_ctx *ctx;
1086 int err;
1087
1088 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1089 if (!ctx)
1090 return -ENOMEM;
1091
1092 ctx->info.sec_flags = vsi->info.sec_flags;
1093 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1094
1095 if (enable)
1096 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1097 else
1098 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1099
1100 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1101 if (err)
1102 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1103 enable ? "ON" : "OFF", vsi->vsi_num, err);
1104 else
1105 vsi->info.sec_flags = ctx->info.sec_flags;
1106
1107 kfree(ctx);
1108
1109 return err;
1110}
1111
1112/**
1113 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1114 * @vsi: VSI to enable Tx spoof checking for
1115 */
1116static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1117{
1118 struct ice_vsi_vlan_ops *vlan_ops;
1119 int err = 0;
1120
1121 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1122
1123 /* Allow VF with VLAN 0 only to send all tagged traffic */
1124 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1125 err = vlan_ops->ena_tx_filtering(vsi);
1126 if (err)
1127 return err;
1128 }
1129
1130 return ice_cfg_mac_antispoof(vsi, true);
1131}
1132
1133/**
1134 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1135 * @vsi: VSI to disable Tx spoof checking for
1136 */
1137static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1138{
1139 struct ice_vsi_vlan_ops *vlan_ops;
1140 int err;
1141
1142 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1143
1144 err = vlan_ops->dis_tx_filtering(vsi);
1145 if (err)
1146 return err;
1147
1148 return ice_cfg_mac_antispoof(vsi, false);
1149}
1150
1151/**
1152 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1153 * @vsi: VSI associated to the VF
1154 * @enable: whether to enable or disable the spoof checking
1155 */
1156int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1157{
1158 int err;
1159
1160 if (enable)
1161 err = ice_vsi_ena_spoofchk(vsi);
1162 else
1163 err = ice_vsi_dis_spoofchk(vsi);
1164
1165 return err;
1166}
1167
1168/**
1169 * ice_is_vf_trusted
1170 * @vf: pointer to the VF info
1171 */
1172bool ice_is_vf_trusted(struct ice_vf *vf)
1173{
1174 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1175}
1176
1177/**
1178 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1179 * @vf: the VF to check
1180 *
1181 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1182 * otherwise
1183 */
1184bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1185{
1186 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1187 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1188}
1189
1190/**
1191 * ice_is_vf_link_up - check if the VF's link is up
1192 * @vf: VF to check if link is up
1193 */
1194bool ice_is_vf_link_up(struct ice_vf *vf)
1195{
1196 struct ice_port_info *pi = ice_vf_get_port_info(vf);
1197
1198 if (ice_check_vf_init(vf))
1199 return false;
1200
1201 if (ice_vf_has_no_qs_ena(vf))
1202 return false;
1203 else if (vf->link_forced)
1204 return vf->link_up;
1205 else
1206 return pi->phy.link_info.link_info &
1207 ICE_AQ_LINK_UP;
1208}
1209
1210/**
1211 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1212 * @vf: VF that control VSI is being invalidated on
1213 */
1214void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1215{
1216 vf->ctrl_vsi_idx = ICE_NO_VSI;
1217}
1218
1219/**
1220 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1221 * @vf: VF that control VSI is being released on
1222 */
1223void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1224{
1225 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1226 ice_vf_ctrl_invalidate_vsi(vf);
1227}
1228
1229/**
1230 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1231 * @vf: VF to setup control VSI for
1232 *
1233 * Returns pointer to the successfully allocated VSI struct on success,
1234 * otherwise returns NULL on failure.
1235 */
1236struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1237{
1238 struct ice_vsi_cfg_params params = {};
1239 struct ice_pf *pf = vf->pf;
1240 struct ice_vsi *vsi;
1241
1242 params.type = ICE_VSI_CTRL;
1243 params.pi = ice_vf_get_port_info(vf);
1244 params.vf = vf;
1245 params.flags = ICE_VSI_FLAG_INIT;
1246
1247 vsi = ice_vsi_setup(pf, ¶ms);
1248 if (!vsi) {
1249 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1250 ice_vf_ctrl_invalidate_vsi(vf);
1251 }
1252
1253 return vsi;
1254}
1255
1256/**
1257 * ice_vf_init_host_cfg - Initialize host admin configuration
1258 * @vf: VF to initialize
1259 * @vsi: the VSI created at initialization
1260 *
1261 * Initialize the VF host configuration. Called during VF creation to setup
1262 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1263 * should only be called during VF creation.
1264 */
1265int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1266{
1267 struct ice_vsi_vlan_ops *vlan_ops;
1268 struct ice_pf *pf = vf->pf;
1269 u8 broadcast[ETH_ALEN];
1270 struct device *dev;
1271 int err;
1272
1273 dev = ice_pf_to_dev(pf);
1274
1275 err = ice_vsi_add_vlan_zero(vsi);
1276 if (err) {
1277 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1278 vf->vf_id);
1279 return err;
1280 }
1281
1282 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1283 err = vlan_ops->ena_rx_filtering(vsi);
1284 if (err) {
1285 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1286 vf->vf_id);
1287 return err;
1288 }
1289
1290 eth_broadcast_addr(broadcast);
1291 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1292 if (err) {
1293 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1294 vf->vf_id, err);
1295 return err;
1296 }
1297
1298 vf->num_mac = 1;
1299
1300 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1301 if (err) {
1302 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1303 vf->vf_id);
1304 return err;
1305 }
1306
1307 return 0;
1308}
1309
1310/**
1311 * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
1312 * @vf: VF to remove access to VSI for
1313 */
1314void ice_vf_invalidate_vsi(struct ice_vf *vf)
1315{
1316 vf->lan_vsi_idx = ICE_NO_VSI;
1317}
1318
1319/**
1320 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1321 * @vf: pointer to the VF structure
1322 *
1323 * Release the VF associated with this VSI and then invalidate the VSI
1324 * indexes.
1325 */
1326void ice_vf_vsi_release(struct ice_vf *vf)
1327{
1328 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1329
1330 if (WARN_ON(!vsi))
1331 return;
1332
1333 ice_vsi_release(vsi);
1334 ice_vf_invalidate_vsi(vf);
1335}
1336
1337/**
1338 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1339 * @pf: the PF private structure
1340 * @vsi: pointer to the VSI
1341 *
1342 * Return first found VF control VSI other than the vsi
1343 * passed by parameter. This function is used to determine
1344 * whether new resources have to be allocated for control VSI
1345 * or they can be shared with existing one.
1346 *
1347 * Return found VF control VSI pointer other itself. Return
1348 * NULL Otherwise.
1349 *
1350 */
1351struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1352{
1353 struct ice_vsi *ctrl_vsi = NULL;
1354 struct ice_vf *vf;
1355 unsigned int bkt;
1356
1357 rcu_read_lock();
1358 ice_for_each_vf_rcu(pf, bkt, vf) {
1359 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1360 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1361 break;
1362 }
1363 }
1364
1365 rcu_read_unlock();
1366 return ctrl_vsi;
1367}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2022, Intel Corporation. */
3
4#include "ice_vf_lib_private.h"
5#include "ice.h"
6#include "ice_lib.h"
7#include "ice_fltr.h"
8#include "ice_virtchnl_allowlist.h"
9
10/* Public functions which may be accessed by all driver files */
11
12/**
13 * ice_get_vf_by_id - Get pointer to VF by ID
14 * @pf: the PF private structure
15 * @vf_id: the VF ID to locate
16 *
17 * Locate and return a pointer to the VF structure associated with a given ID.
18 * Returns NULL if the ID does not have a valid VF structure associated with
19 * it.
20 *
21 * This function takes a reference to the VF, which must be released by
22 * calling ice_put_vf() once the caller is finished accessing the VF structure
23 * returned.
24 */
25struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26{
27 struct ice_vf *vf;
28
29 rcu_read_lock();
30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 if (vf->vf_id == vf_id) {
32 struct ice_vf *found;
33
34 if (kref_get_unless_zero(&vf->refcnt))
35 found = vf;
36 else
37 found = NULL;
38
39 rcu_read_unlock();
40 return found;
41 }
42 }
43 rcu_read_unlock();
44
45 return NULL;
46}
47
48/**
49 * ice_release_vf - Release VF associated with a refcount
50 * @ref: the kref decremented to zero
51 *
52 * Callback function for kref_put to release a VF once its reference count has
53 * hit zero.
54 */
55static void ice_release_vf(struct kref *ref)
56{
57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58
59 pci_dev_put(vf->vfdev);
60
61 vf->vf_ops->free(vf);
62}
63
64/**
65 * ice_put_vf - Release a reference to a VF
66 * @vf: the VF structure to decrease reference count on
67 *
68 * Decrease the reference count for a VF, and free the entry if it is no
69 * longer in use.
70 *
71 * This must be called after ice_get_vf_by_id() once the reference to the VF
72 * structure is no longer used. Otherwise, the VF structure will never be
73 * freed.
74 */
75void ice_put_vf(struct ice_vf *vf)
76{
77 kref_put(&vf->refcnt, ice_release_vf);
78}
79
80/**
81 * ice_has_vfs - Return true if the PF has any associated VFs
82 * @pf: the PF private structure
83 *
84 * Return whether or not the PF has any allocated VFs.
85 *
86 * Note that this function only guarantees that there are no VFs at the point
87 * of calling it. It does not guarantee that no more VFs will be added.
88 */
89bool ice_has_vfs(struct ice_pf *pf)
90{
91 /* A simple check that the hash table is not empty does not require
92 * the mutex or rcu_read_lock.
93 */
94 return !hash_empty(pf->vfs.table);
95}
96
97/**
98 * ice_get_num_vfs - Get number of allocated VFs
99 * @pf: the PF private structure
100 *
101 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
102 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
103 * the output of this function.
104 */
105u16 ice_get_num_vfs(struct ice_pf *pf)
106{
107 struct ice_vf *vf;
108 unsigned int bkt;
109 u16 num_vfs = 0;
110
111 rcu_read_lock();
112 ice_for_each_vf_rcu(pf, bkt, vf)
113 num_vfs++;
114 rcu_read_unlock();
115
116 return num_vfs;
117}
118
119/**
120 * ice_get_vf_vsi - get VF's VSI based on the stored index
121 * @vf: VF used to get VSI
122 */
123struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
124{
125 if (vf->lan_vsi_idx == ICE_NO_VSI)
126 return NULL;
127
128 return vf->pf->vsi[vf->lan_vsi_idx];
129}
130
131/**
132 * ice_is_vf_disabled
133 * @vf: pointer to the VF info
134 *
135 * If the PF has been disabled, there is no need resetting VF until PF is
136 * active again. Similarly, if the VF has been disabled, this means something
137 * else is resetting the VF, so we shouldn't continue.
138 *
139 * Returns true if the caller should consider the VF as disabled whether
140 * because that single VF is explicitly disabled or because the PF is
141 * currently disabled.
142 */
143bool ice_is_vf_disabled(struct ice_vf *vf)
144{
145 struct ice_pf *pf = vf->pf;
146
147 return (test_bit(ICE_VF_DIS, pf->state) ||
148 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
149}
150
151/**
152 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
153 * @vf: The VF being resseting
154 *
155 * The max poll time is about ~800ms, which is about the maximum time it takes
156 * for a VF to be reset and/or a VF driver to be removed.
157 */
158static void ice_wait_on_vf_reset(struct ice_vf *vf)
159{
160 int i;
161
162 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
163 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
164 break;
165 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
166 }
167}
168
169/**
170 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
171 * @vf: VF to check if it's ready to be configured/queried
172 *
173 * The purpose of this function is to make sure the VF is not in reset, not
174 * disabled, and initialized so it can be configured and/or queried by a host
175 * administrator.
176 */
177int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
178{
179 ice_wait_on_vf_reset(vf);
180
181 if (ice_is_vf_disabled(vf))
182 return -EINVAL;
183
184 if (ice_check_vf_init(vf))
185 return -EBUSY;
186
187 return 0;
188}
189
190/**
191 * ice_trigger_vf_reset - Reset a VF on HW
192 * @vf: pointer to the VF structure
193 * @is_vflr: true if VFLR was issued, false if not
194 * @is_pfr: true if the reset was triggered due to a previous PFR
195 *
196 * Trigger hardware to start a reset for a particular VF. Expects the caller
197 * to wait the proper amount of time to allow hardware to reset the VF before
198 * it cleans up and restores VF functionality.
199 */
200static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
201{
202 /* Inform VF that it is no longer active, as a warning */
203 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
204
205 /* Disable VF's configuration API during reset. The flag is re-enabled
206 * when it's safe again to access VF's VSI.
207 */
208 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
209
210 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
211 * needs to clear them in the case of VFR/VFLR. If this is done for
212 * PFR, it can mess up VF resets because the VF driver may already
213 * have started cleanup by the time we get here.
214 */
215 if (!is_pfr)
216 vf->vf_ops->clear_mbx_register(vf);
217
218 vf->vf_ops->trigger_reset_register(vf, is_vflr);
219}
220
221static void ice_vf_clear_counters(struct ice_vf *vf)
222{
223 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
224
225 if (vsi)
226 vsi->num_vlan = 0;
227
228 vf->num_mac = 0;
229 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
230 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
231}
232
233/**
234 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
235 * @vf: VF to perform pre VSI rebuild tasks
236 *
237 * These tasks are items that don't need to be amortized since they are most
238 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
239 */
240static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
241{
242 /* Close any IRQ mapping now */
243 if (vf->vf_ops->irq_close)
244 vf->vf_ops->irq_close(vf);
245
246 ice_vf_clear_counters(vf);
247 vf->vf_ops->clear_reset_trigger(vf);
248}
249
250/**
251 * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
252 * @vf: VF to reconfigure the VSI for
253 *
254 * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
255 * configuration change, etc).
256 *
257 * It brings the VSI down and then reconfigures it with the hardware.
258 */
259int ice_vf_reconfig_vsi(struct ice_vf *vf)
260{
261 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
262 struct ice_vsi_cfg_params params = {};
263 struct ice_pf *pf = vf->pf;
264 int err;
265
266 if (WARN_ON(!vsi))
267 return -EINVAL;
268
269 params = ice_vsi_to_params(vsi);
270 params.flags = ICE_VSI_FLAG_NO_INIT;
271
272 ice_vsi_decfg(vsi);
273 ice_fltr_remove_all(vsi);
274
275 err = ice_vsi_cfg(vsi, ¶ms);
276 if (err) {
277 dev_err(ice_pf_to_dev(pf),
278 "Failed to reconfigure the VF%u's VSI, error %d\n",
279 vf->vf_id, err);
280 return err;
281 }
282
283 /* Update the lan_vsi_num field since it might have been changed. The
284 * PF lan_vsi_idx number remains the same so we don't need to change
285 * that.
286 */
287 vf->lan_vsi_num = vsi->vsi_num;
288
289 return 0;
290}
291
292/**
293 * ice_vf_rebuild_vsi - rebuild the VF's VSI
294 * @vf: VF to rebuild the VSI for
295 *
296 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
297 * host, PFR, CORER, etc.).
298 *
299 * It reprograms the VSI configuration back into hardware.
300 */
301static int ice_vf_rebuild_vsi(struct ice_vf *vf)
302{
303 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
304 struct ice_pf *pf = vf->pf;
305
306 if (WARN_ON(!vsi))
307 return -EINVAL;
308
309 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
310 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
311 vf->vf_id);
312 return -EIO;
313 }
314 /* vsi->idx will remain the same in this case so don't update
315 * vf->lan_vsi_idx
316 */
317 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
318 vf->lan_vsi_num = vsi->vsi_num;
319
320 return 0;
321}
322
323/**
324 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
325 * @vf: VF to add MAC filters for
326 * @vsi: Pointer to VSI
327 *
328 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
329 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
330 */
331static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
332{
333 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
334 struct device *dev = ice_pf_to_dev(vf->pf);
335 int err;
336
337 if (ice_vf_is_port_vlan_ena(vf)) {
338 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
339 if (err) {
340 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
341 vf->vf_id, err);
342 return err;
343 }
344
345 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
346 } else {
347 err = ice_vsi_add_vlan_zero(vsi);
348 }
349
350 if (err) {
351 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
352 ice_vf_is_port_vlan_ena(vf) ?
353 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
354 return err;
355 }
356
357 err = vlan_ops->ena_rx_filtering(vsi);
358 if (err)
359 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
360 vf->vf_id, vsi->idx, err);
361
362 return 0;
363}
364
365/**
366 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
367 * @vf: VF to re-apply the configuration for
368 *
369 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
370 * needs to re-apply the host configured Tx rate limiting configuration.
371 */
372static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
373{
374 struct device *dev = ice_pf_to_dev(vf->pf);
375 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
376 int err;
377
378 if (WARN_ON(!vsi))
379 return -EINVAL;
380
381 if (vf->min_tx_rate) {
382 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
383 if (err) {
384 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
385 vf->min_tx_rate, vf->vf_id, err);
386 return err;
387 }
388 }
389
390 if (vf->max_tx_rate) {
391 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
392 if (err) {
393 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
394 vf->max_tx_rate, vf->vf_id, err);
395 return err;
396 }
397 }
398
399 return 0;
400}
401
402/**
403 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
404 * @vf: VF to configure trust setting for
405 */
406static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
407{
408 assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
409}
410
411/**
412 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
413 * @vf: VF to add MAC filters for
414 *
415 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
416 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
417 */
418static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
419{
420 struct device *dev = ice_pf_to_dev(vf->pf);
421 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
422 u8 broadcast[ETH_ALEN];
423 int status;
424
425 if (WARN_ON(!vsi))
426 return -EINVAL;
427
428 if (ice_is_eswitch_mode_switchdev(vf->pf))
429 return 0;
430
431 eth_broadcast_addr(broadcast);
432 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
433 if (status) {
434 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
435 vf->vf_id, status);
436 return status;
437 }
438
439 vf->num_mac++;
440
441 if (is_valid_ether_addr(vf->hw_lan_addr)) {
442 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
443 ICE_FWD_TO_VSI);
444 if (status) {
445 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
446 &vf->hw_lan_addr[0], vf->vf_id,
447 status);
448 return status;
449 }
450 vf->num_mac++;
451
452 ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
453 }
454
455 return 0;
456}
457
458/**
459 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
460 * @vsi: Pointer to VSI
461 *
462 * This function moves VSI into corresponding scheduler aggregator node
463 * based on cached value of "aggregator node info" per VSI
464 */
465static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
466{
467 struct ice_pf *pf = vsi->back;
468 struct device *dev;
469 int status;
470
471 if (!vsi->agg_node)
472 return;
473
474 dev = ice_pf_to_dev(pf);
475 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
476 dev_dbg(dev,
477 "agg_id %u already has reached max_num_vsis %u\n",
478 vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
479 return;
480 }
481
482 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
483 vsi->idx, vsi->tc_cfg.ena_tc);
484 if (status)
485 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
486 vsi->idx, vsi->agg_node->agg_id);
487 else
488 vsi->agg_node->num_vsis++;
489}
490
491/**
492 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
493 * @vf: VF to rebuild host configuration on
494 */
495static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
496{
497 struct device *dev = ice_pf_to_dev(vf->pf);
498 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
499
500 if (WARN_ON(!vsi))
501 return;
502
503 ice_vf_set_host_trust_cfg(vf);
504
505 if (ice_vf_rebuild_host_mac_cfg(vf))
506 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
507 vf->vf_id);
508
509 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
510 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
511 vf->vf_id);
512
513 if (ice_vf_rebuild_host_tx_rate_cfg(vf))
514 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
515 vf->vf_id);
516
517 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
518 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
519 vf->vf_id);
520
521 /* rebuild aggregator node config for main VF VSI */
522 ice_vf_rebuild_aggregator_node_cfg(vsi);
523}
524
525/**
526 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
527 * @vf: pointer to the VF structure
528 */
529static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
530{
531 /* Clear Rx/Tx enabled queues flag */
532 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
533 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
534 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
535}
536
537/**
538 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
539 * @vf: VF to set in initialized state
540 *
541 * After this function the VF will be ready to receive/handle the
542 * VIRTCHNL_OP_GET_VF_RESOURCES message
543 */
544static void ice_vf_set_initialized(struct ice_vf *vf)
545{
546 ice_set_vf_state_qs_dis(vf);
547 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
548 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
549 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
550 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
551 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
552}
553
554/**
555 * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
556 * @vf: the VF being reset
557 *
558 * Perform reset tasks which must occur after the VSI has been re-created or
559 * rebuilt during a VF reset.
560 */
561static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
562{
563 ice_vf_rebuild_host_cfg(vf);
564 ice_vf_set_initialized(vf);
565
566 vf->vf_ops->post_vsi_rebuild(vf);
567}
568
569/**
570 * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
571 * are in unicast promiscuous mode
572 * @pf: PF structure for accessing VF(s)
573 *
574 * Return false if no VF(s) are in unicast promiscuous mode,
575 * else return true
576 */
577bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
578{
579 bool is_vf_promisc = false;
580 struct ice_vf *vf;
581 unsigned int bkt;
582
583 rcu_read_lock();
584 ice_for_each_vf_rcu(pf, bkt, vf) {
585 /* found a VF that has promiscuous mode configured */
586 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
587 is_vf_promisc = true;
588 break;
589 }
590 }
591 rcu_read_unlock();
592
593 return is_vf_promisc;
594}
595
596/**
597 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
598 * @vf: the VF pointer
599 * @vsi: the VSI to configure
600 * @ucast_m: promiscuous mask to apply to unicast
601 * @mcast_m: promiscuous mask to apply to multicast
602 *
603 * Decide which mask should be used for unicast and multicast filter,
604 * based on presence of VLANs
605 */
606void
607ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
608 u8 *ucast_m, u8 *mcast_m)
609{
610 if (ice_vf_is_port_vlan_ena(vf) ||
611 ice_vsi_has_non_zero_vlans(vsi)) {
612 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
613 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
614 } else {
615 *mcast_m = ICE_MCAST_PROMISC_BITS;
616 *ucast_m = ICE_UCAST_PROMISC_BITS;
617 }
618}
619
620/**
621 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
622 * @vf: the VF pointer
623 * @vsi: the VSI to configure
624 *
625 * Clear all promiscuous/allmulticast filters for a VF
626 */
627static int
628ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
629{
630 struct ice_pf *pf = vf->pf;
631 u8 ucast_m, mcast_m;
632 int ret = 0;
633
634 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
635 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
636 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
637 if (ice_is_dflt_vsi_in_use(vsi->port_info))
638 ret = ice_clear_dflt_vsi(vsi);
639 } else {
640 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
641 }
642
643 if (ret) {
644 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
645 } else {
646 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
647 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
648 }
649 }
650
651 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
652 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
653 if (ret) {
654 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
655 } else {
656 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
657 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
658 }
659 }
660 return ret;
661}
662
663/**
664 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
665 * @vf: the VF to configure
666 * @vsi: the VF's VSI
667 * @promisc_m: the promiscuous mode to enable
668 */
669int
670ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
671{
672 struct ice_hw *hw = &vsi->back->hw;
673 int status;
674
675 if (ice_vf_is_port_vlan_ena(vf))
676 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
677 ice_vf_get_port_vlan_id(vf));
678 else if (ice_vsi_has_non_zero_vlans(vsi))
679 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
680 else
681 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
682
683 if (status && status != -EEXIST) {
684 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
685 vf->vf_id, status);
686 return status;
687 }
688
689 return 0;
690}
691
692/**
693 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
694 * @vf: the VF to configure
695 * @vsi: the VF's VSI
696 * @promisc_m: the promiscuous mode to disable
697 */
698int
699ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
700{
701 struct ice_hw *hw = &vsi->back->hw;
702 int status;
703
704 if (ice_vf_is_port_vlan_ena(vf))
705 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
706 ice_vf_get_port_vlan_id(vf));
707 else if (ice_vsi_has_non_zero_vlans(vsi))
708 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
709 else
710 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
711
712 if (status && status != -ENOENT) {
713 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
714 vf->vf_id, status);
715 return status;
716 }
717
718 return 0;
719}
720
721/**
722 * ice_reset_all_vfs - reset all allocated VFs in one go
723 * @pf: pointer to the PF structure
724 *
725 * Reset all VFs at once, in response to a PF or other device reset.
726 *
727 * First, tell the hardware to reset each VF, then do all the waiting in one
728 * chunk, and finally finish restoring each VF after the wait. This is useful
729 * during PF routines which need to reset all VFs, as otherwise it must perform
730 * these resets in a serialized fashion.
731 */
732void ice_reset_all_vfs(struct ice_pf *pf)
733{
734 struct device *dev = ice_pf_to_dev(pf);
735 struct ice_hw *hw = &pf->hw;
736 struct ice_vf *vf;
737 unsigned int bkt;
738
739 /* If we don't have any VFs, then there is nothing to reset */
740 if (!ice_has_vfs(pf))
741 return;
742
743 mutex_lock(&pf->vfs.table_lock);
744
745 /* clear all malicious info if the VFs are getting reset */
746 ice_for_each_vf(pf, bkt, vf)
747 ice_mbx_clear_malvf(&vf->mbx_info);
748
749 /* If VFs have been disabled, there is no need to reset */
750 if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
751 mutex_unlock(&pf->vfs.table_lock);
752 return;
753 }
754
755 /* Begin reset on all VFs at once */
756 ice_for_each_vf(pf, bkt, vf)
757 ice_trigger_vf_reset(vf, true, true);
758
759 /* HW requires some time to make sure it can flush the FIFO for a VF
760 * when it resets it. Now that we've triggered all of the VFs, iterate
761 * the table again and wait for each VF to complete.
762 */
763 ice_for_each_vf(pf, bkt, vf) {
764 if (!vf->vf_ops->poll_reset_status(vf)) {
765 /* Display a warning if at least one VF didn't manage
766 * to reset in time, but continue on with the
767 * operation.
768 */
769 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
770 break;
771 }
772 }
773
774 /* free VF resources to begin resetting the VSI state */
775 ice_for_each_vf(pf, bkt, vf) {
776 mutex_lock(&vf->cfg_lock);
777
778 ice_eswitch_detach(pf, vf);
779 vf->driver_caps = 0;
780 ice_vc_set_default_allowlist(vf);
781
782 ice_vf_fdir_exit(vf);
783 ice_vf_fdir_init(vf);
784 /* clean VF control VSI when resetting VFs since it should be
785 * setup only when VF creates its first FDIR rule.
786 */
787 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
788 ice_vf_ctrl_invalidate_vsi(vf);
789
790 ice_vf_pre_vsi_rebuild(vf);
791 ice_vf_rebuild_vsi(vf);
792 ice_vf_post_vsi_rebuild(vf);
793
794 ice_eswitch_attach(pf, vf);
795
796 mutex_unlock(&vf->cfg_lock);
797 }
798
799 ice_flush(hw);
800 clear_bit(ICE_VF_DIS, pf->state);
801
802 mutex_unlock(&pf->vfs.table_lock);
803}
804
805/**
806 * ice_notify_vf_reset - Notify VF of a reset event
807 * @vf: pointer to the VF structure
808 */
809static void ice_notify_vf_reset(struct ice_vf *vf)
810{
811 struct ice_hw *hw = &vf->pf->hw;
812 struct virtchnl_pf_event pfe;
813
814 /* Bail out if VF is in disabled state, neither initialized, nor active
815 * state - otherwise proceed with notifications
816 */
817 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
818 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
819 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
820 return;
821
822 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
823 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
824 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
825 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
826 NULL);
827}
828
829/**
830 * ice_reset_vf - Reset a particular VF
831 * @vf: pointer to the VF structure
832 * @flags: flags controlling behavior of the reset
833 *
834 * Flags:
835 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
836 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
837 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
838 *
839 * Returns 0 if the VF is currently in reset, if resets are disabled, or if
840 * the VF resets successfully. Returns an error code if the VF fails to
841 * rebuild.
842 */
843int ice_reset_vf(struct ice_vf *vf, u32 flags)
844{
845 struct ice_pf *pf = vf->pf;
846 struct ice_lag *lag;
847 struct ice_vsi *vsi;
848 u8 act_prt, pri_prt;
849 struct device *dev;
850 int err = 0;
851 bool rsd;
852
853 dev = ice_pf_to_dev(pf);
854 act_prt = ICE_LAG_INVALID_PORT;
855 pri_prt = pf->hw.port_info->lport;
856
857 if (flags & ICE_VF_RESET_NOTIFY)
858 ice_notify_vf_reset(vf);
859
860 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
861 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
862 vf->vf_id);
863 return 0;
864 }
865
866 lag = pf->lag;
867 mutex_lock(&pf->lag_mutex);
868 if (lag && lag->bonded && lag->primary) {
869 act_prt = lag->active_port;
870 if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
871 lag->upper_netdev)
872 ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
873 else
874 act_prt = ICE_LAG_INVALID_PORT;
875 }
876
877 if (flags & ICE_VF_RESET_LOCK)
878 mutex_lock(&vf->cfg_lock);
879 else
880 lockdep_assert_held(&vf->cfg_lock);
881
882 if (ice_is_vf_disabled(vf)) {
883 vsi = ice_get_vf_vsi(vf);
884 if (!vsi) {
885 dev_dbg(dev, "VF is already removed\n");
886 err = -EINVAL;
887 goto out_unlock;
888 }
889 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
890
891 if (ice_vsi_is_rx_queue_active(vsi))
892 ice_vsi_stop_all_rx_rings(vsi);
893
894 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
895 vf->vf_id);
896 goto out_unlock;
897 }
898
899 /* Set VF disable bit state here, before triggering reset */
900 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
901 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
902
903 vsi = ice_get_vf_vsi(vf);
904 if (WARN_ON(!vsi)) {
905 err = -EIO;
906 goto out_unlock;
907 }
908
909 ice_dis_vf_qs(vf);
910
911 /* Call Disable LAN Tx queue AQ whether or not queues are
912 * enabled. This is needed for successful completion of VFR.
913 */
914 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
915 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
916
917 /* poll VPGEN_VFRSTAT reg to make sure
918 * that reset is complete
919 */
920 rsd = vf->vf_ops->poll_reset_status(vf);
921
922 /* Display a warning if VF didn't manage to reset in time, but need to
923 * continue on with the operation.
924 */
925 if (!rsd)
926 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
927
928 vf->driver_caps = 0;
929 ice_vc_set_default_allowlist(vf);
930
931 /* disable promiscuous modes in case they were enabled
932 * ignore any error if disabling process failed
933 */
934 ice_vf_clear_all_promisc_modes(vf, vsi);
935
936 ice_vf_fdir_exit(vf);
937 ice_vf_fdir_init(vf);
938 /* clean VF control VSI when resetting VF since it should be setup
939 * only when VF creates its first FDIR rule.
940 */
941 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
942 ice_vf_ctrl_vsi_release(vf);
943
944 ice_vf_pre_vsi_rebuild(vf);
945
946 if (ice_vf_reconfig_vsi(vf)) {
947 dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
948 vf->vf_id);
949 err = -EFAULT;
950 goto out_unlock;
951 }
952
953 ice_vf_post_vsi_rebuild(vf);
954 vsi = ice_get_vf_vsi(vf);
955 if (WARN_ON(!vsi)) {
956 err = -EINVAL;
957 goto out_unlock;
958 }
959
960 ice_eswitch_update_repr(vf->repr_id, vsi);
961
962 /* if the VF has been reset allow it to come up again */
963 ice_mbx_clear_malvf(&vf->mbx_info);
964
965out_unlock:
966 if (flags & ICE_VF_RESET_LOCK)
967 mutex_unlock(&vf->cfg_lock);
968
969 if (lag && lag->bonded && lag->primary &&
970 act_prt != ICE_LAG_INVALID_PORT)
971 ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
972 mutex_unlock(&pf->lag_mutex);
973
974 return err;
975}
976
977/**
978 * ice_set_vf_state_dis - Set VF state to disabled
979 * @vf: pointer to the VF structure
980 */
981void ice_set_vf_state_dis(struct ice_vf *vf)
982{
983 ice_set_vf_state_qs_dis(vf);
984 vf->vf_ops->clear_reset_state(vf);
985}
986
987/* Private functions only accessed from other virtualization files */
988
989/**
990 * ice_initialize_vf_entry - Initialize a VF entry
991 * @vf: pointer to the VF structure
992 */
993void ice_initialize_vf_entry(struct ice_vf *vf)
994{
995 struct ice_pf *pf = vf->pf;
996 struct ice_vfs *vfs;
997
998 vfs = &pf->vfs;
999
1000 /* assign default capabilities */
1001 vf->spoofchk = true;
1002 vf->num_vf_qs = vfs->num_qps_per;
1003 ice_vc_set_default_allowlist(vf);
1004 ice_virtchnl_set_dflt_ops(vf);
1005
1006 /* ctrl_vsi_idx will be set to a valid value only when iAVF
1007 * creates its first fdir rule.
1008 */
1009 ice_vf_ctrl_invalidate_vsi(vf);
1010 ice_vf_fdir_init(vf);
1011
1012 /* Initialize mailbox info for this VF */
1013 ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
1014
1015 mutex_init(&vf->cfg_lock);
1016}
1017
1018/**
1019 * ice_dis_vf_qs - Disable the VF queues
1020 * @vf: pointer to the VF structure
1021 */
1022void ice_dis_vf_qs(struct ice_vf *vf)
1023{
1024 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1025
1026 if (WARN_ON(!vsi))
1027 return;
1028
1029 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
1030 ice_vsi_stop_all_rx_rings(vsi);
1031 ice_set_vf_state_qs_dis(vf);
1032}
1033
1034/**
1035 * ice_err_to_virt_err - translate errors for VF return code
1036 * @err: error return code
1037 */
1038enum virtchnl_status_code ice_err_to_virt_err(int err)
1039{
1040 switch (err) {
1041 case 0:
1042 return VIRTCHNL_STATUS_SUCCESS;
1043 case -EINVAL:
1044 case -ENODEV:
1045 return VIRTCHNL_STATUS_ERR_PARAM;
1046 case -ENOMEM:
1047 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
1048 case -EALREADY:
1049 case -EBUSY:
1050 case -EIO:
1051 case -ENOSPC:
1052 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1053 default:
1054 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
1055 }
1056}
1057
1058/**
1059 * ice_check_vf_init - helper to check if VF init complete
1060 * @vf: the pointer to the VF to check
1061 */
1062int ice_check_vf_init(struct ice_vf *vf)
1063{
1064 struct ice_pf *pf = vf->pf;
1065
1066 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1067 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
1068 vf->vf_id);
1069 return -EBUSY;
1070 }
1071 return 0;
1072}
1073
1074/**
1075 * ice_vf_get_port_info - Get the VF's port info structure
1076 * @vf: VF used to get the port info structure for
1077 */
1078struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
1079{
1080 return vf->pf->hw.port_info;
1081}
1082
1083/**
1084 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
1085 * @vsi: the VSI to configure
1086 * @enable: whether to enable or disable the spoof checking
1087 *
1088 * Configure a VSI to enable (or disable) spoof checking behavior.
1089 */
1090static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
1091{
1092 struct ice_vsi_ctx *ctx;
1093 int err;
1094
1095 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1096 if (!ctx)
1097 return -ENOMEM;
1098
1099 ctx->info.sec_flags = vsi->info.sec_flags;
1100 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1101
1102 if (enable)
1103 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1104 else
1105 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1106
1107 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
1108 if (err)
1109 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
1110 enable ? "ON" : "OFF", vsi->vsi_num, err);
1111 else
1112 vsi->info.sec_flags = ctx->info.sec_flags;
1113
1114 kfree(ctx);
1115
1116 return err;
1117}
1118
1119/**
1120 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
1121 * @vsi: VSI to enable Tx spoof checking for
1122 */
1123static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
1124{
1125 struct ice_vsi_vlan_ops *vlan_ops;
1126 int err = 0;
1127
1128 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1129
1130 /* Allow VF with VLAN 0 only to send all tagged traffic */
1131 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
1132 err = vlan_ops->ena_tx_filtering(vsi);
1133 if (err)
1134 return err;
1135 }
1136
1137 return ice_cfg_mac_antispoof(vsi, true);
1138}
1139
1140/**
1141 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
1142 * @vsi: VSI to disable Tx spoof checking for
1143 */
1144static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
1145{
1146 struct ice_vsi_vlan_ops *vlan_ops;
1147 int err;
1148
1149 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1150
1151 err = vlan_ops->dis_tx_filtering(vsi);
1152 if (err)
1153 return err;
1154
1155 return ice_cfg_mac_antispoof(vsi, false);
1156}
1157
1158/**
1159 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
1160 * @vsi: VSI associated to the VF
1161 * @enable: whether to enable or disable the spoof checking
1162 */
1163int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
1164{
1165 int err;
1166
1167 if (enable)
1168 err = ice_vsi_ena_spoofchk(vsi);
1169 else
1170 err = ice_vsi_dis_spoofchk(vsi);
1171
1172 return err;
1173}
1174
1175/**
1176 * ice_is_vf_trusted
1177 * @vf: pointer to the VF info
1178 */
1179bool ice_is_vf_trusted(struct ice_vf *vf)
1180{
1181 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1182}
1183
1184/**
1185 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
1186 * @vf: the VF to check
1187 *
1188 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
1189 * otherwise
1190 */
1191bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
1192{
1193 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
1194 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
1195}
1196
1197/**
1198 * ice_is_vf_link_up - check if the VF's link is up
1199 * @vf: VF to check if link is up
1200 */
1201bool ice_is_vf_link_up(struct ice_vf *vf)
1202{
1203 struct ice_port_info *pi = ice_vf_get_port_info(vf);
1204
1205 if (ice_check_vf_init(vf))
1206 return false;
1207
1208 if (ice_vf_has_no_qs_ena(vf))
1209 return false;
1210 else if (vf->link_forced)
1211 return vf->link_up;
1212 else
1213 return pi->phy.link_info.link_info &
1214 ICE_AQ_LINK_UP;
1215}
1216
1217/**
1218 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1219 * @vf: VF that control VSI is being invalidated on
1220 */
1221void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1222{
1223 vf->ctrl_vsi_idx = ICE_NO_VSI;
1224}
1225
1226/**
1227 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1228 * @vf: VF that control VSI is being released on
1229 */
1230void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1231{
1232 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1233 ice_vf_ctrl_invalidate_vsi(vf);
1234}
1235
1236/**
1237 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1238 * @vf: VF to setup control VSI for
1239 *
1240 * Returns pointer to the successfully allocated VSI struct on success,
1241 * otherwise returns NULL on failure.
1242 */
1243struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1244{
1245 struct ice_vsi_cfg_params params = {};
1246 struct ice_pf *pf = vf->pf;
1247 struct ice_vsi *vsi;
1248
1249 params.type = ICE_VSI_CTRL;
1250 params.pi = ice_vf_get_port_info(vf);
1251 params.vf = vf;
1252 params.flags = ICE_VSI_FLAG_INIT;
1253
1254 vsi = ice_vsi_setup(pf, ¶ms);
1255 if (!vsi) {
1256 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1257 ice_vf_ctrl_invalidate_vsi(vf);
1258 }
1259
1260 return vsi;
1261}
1262
1263/**
1264 * ice_vf_init_host_cfg - Initialize host admin configuration
1265 * @vf: VF to initialize
1266 * @vsi: the VSI created at initialization
1267 *
1268 * Initialize the VF host configuration. Called during VF creation to setup
1269 * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
1270 * should only be called during VF creation.
1271 */
1272int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
1273{
1274 struct ice_vsi_vlan_ops *vlan_ops;
1275 struct ice_pf *pf = vf->pf;
1276 u8 broadcast[ETH_ALEN];
1277 struct device *dev;
1278 int err;
1279
1280 dev = ice_pf_to_dev(pf);
1281
1282 err = ice_vsi_add_vlan_zero(vsi);
1283 if (err) {
1284 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1285 vf->vf_id);
1286 return err;
1287 }
1288
1289 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1290 err = vlan_ops->ena_rx_filtering(vsi);
1291 if (err) {
1292 dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
1293 vf->vf_id);
1294 return err;
1295 }
1296
1297 eth_broadcast_addr(broadcast);
1298 err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1299 if (err) {
1300 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
1301 vf->vf_id, err);
1302 return err;
1303 }
1304
1305 vf->num_mac = 1;
1306
1307 err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
1308 if (err) {
1309 dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
1310 vf->vf_id);
1311 return err;
1312 }
1313
1314 return 0;
1315}
1316
1317/**
1318 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
1319 * @vf: VF to remove access to VSI for
1320 */
1321void ice_vf_invalidate_vsi(struct ice_vf *vf)
1322{
1323 vf->lan_vsi_idx = ICE_NO_VSI;
1324 vf->lan_vsi_num = ICE_NO_VSI;
1325}
1326
1327/**
1328 * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
1329 * @vf: pointer to the VF structure
1330 *
1331 * Release the VF associated with this VSI and then invalidate the VSI
1332 * indexes.
1333 */
1334void ice_vf_vsi_release(struct ice_vf *vf)
1335{
1336 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1337
1338 if (WARN_ON(!vsi))
1339 return;
1340
1341 ice_vsi_release(vsi);
1342 ice_vf_invalidate_vsi(vf);
1343}
1344
1345/**
1346 * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
1347 * @pf: the PF private structure
1348 * @vsi: pointer to the VSI
1349 *
1350 * Return first found VF control VSI other than the vsi
1351 * passed by parameter. This function is used to determine
1352 * whether new resources have to be allocated for control VSI
1353 * or they can be shared with existing one.
1354 *
1355 * Return found VF control VSI pointer other itself. Return
1356 * NULL Otherwise.
1357 *
1358 */
1359struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
1360{
1361 struct ice_vsi *ctrl_vsi = NULL;
1362 struct ice_vf *vf;
1363 unsigned int bkt;
1364
1365 rcu_read_lock();
1366 ice_for_each_vf_rcu(pf, bkt, vf) {
1367 if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
1368 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1369 break;
1370 }
1371 }
1372
1373 rcu_read_unlock();
1374 return ctrl_vsi;
1375}