Loading...
Note: File does not exist in v4.17.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2022, Intel Corporation. */
3
4#include "ice_vf_lib_private.h"
5#include "ice.h"
6#include "ice_lib.h"
7#include "ice_fltr.h"
8#include "ice_virtchnl_allowlist.h"
9
10/* Public functions which may be accessed by all driver files */
11
12/**
13 * ice_get_vf_by_id - Get pointer to VF by ID
14 * @pf: the PF private structure
15 * @vf_id: the VF ID to locate
16 *
17 * Locate and return a pointer to the VF structure associated with a given ID.
18 * Returns NULL if the ID does not have a valid VF structure associated with
19 * it.
20 *
21 * This function takes a reference to the VF, which must be released by
22 * calling ice_put_vf() once the caller is finished accessing the VF structure
23 * returned.
24 */
25struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
26{
27 struct ice_vf *vf;
28
29 rcu_read_lock();
30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
31 if (vf->vf_id == vf_id) {
32 struct ice_vf *found;
33
34 if (kref_get_unless_zero(&vf->refcnt))
35 found = vf;
36 else
37 found = NULL;
38
39 rcu_read_unlock();
40 return found;
41 }
42 }
43 rcu_read_unlock();
44
45 return NULL;
46}
47
48/**
49 * ice_release_vf - Release VF associated with a refcount
50 * @ref: the kref decremented to zero
51 *
52 * Callback function for kref_put to release a VF once its reference count has
53 * hit zero.
54 */
55static void ice_release_vf(struct kref *ref)
56{
57 struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
58
59 vf->vf_ops->free(vf);
60}
61
62/**
63 * ice_put_vf - Release a reference to a VF
64 * @vf: the VF structure to decrease reference count on
65 *
66 * Decrease the reference count for a VF, and free the entry if it is no
67 * longer in use.
68 *
69 * This must be called after ice_get_vf_by_id() once the reference to the VF
70 * structure is no longer used. Otherwise, the VF structure will never be
71 * freed.
72 */
73void ice_put_vf(struct ice_vf *vf)
74{
75 kref_put(&vf->refcnt, ice_release_vf);
76}
77
78/**
79 * ice_has_vfs - Return true if the PF has any associated VFs
80 * @pf: the PF private structure
81 *
82 * Return whether or not the PF has any allocated VFs.
83 *
84 * Note that this function only guarantees that there are no VFs at the point
85 * of calling it. It does not guarantee that no more VFs will be added.
86 */
87bool ice_has_vfs(struct ice_pf *pf)
88{
89 /* A simple check that the hash table is not empty does not require
90 * the mutex or rcu_read_lock.
91 */
92 return !hash_empty(pf->vfs.table);
93}
94
95/**
96 * ice_get_num_vfs - Get number of allocated VFs
97 * @pf: the PF private structure
98 *
99 * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
100 * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
101 * the output of this function.
102 */
103u16 ice_get_num_vfs(struct ice_pf *pf)
104{
105 struct ice_vf *vf;
106 unsigned int bkt;
107 u16 num_vfs = 0;
108
109 rcu_read_lock();
110 ice_for_each_vf_rcu(pf, bkt, vf)
111 num_vfs++;
112 rcu_read_unlock();
113
114 return num_vfs;
115}
116
117/**
118 * ice_get_vf_vsi - get VF's VSI based on the stored index
119 * @vf: VF used to get VSI
120 */
121struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
122{
123 if (vf->lan_vsi_idx == ICE_NO_VSI)
124 return NULL;
125
126 return vf->pf->vsi[vf->lan_vsi_idx];
127}
128
129/**
130 * ice_is_vf_disabled
131 * @vf: pointer to the VF info
132 *
133 * If the PF has been disabled, there is no need resetting VF until PF is
134 * active again. Similarly, if the VF has been disabled, this means something
135 * else is resetting the VF, so we shouldn't continue.
136 *
137 * Returns true if the caller should consider the VF as disabled whether
138 * because that single VF is explicitly disabled or because the PF is
139 * currently disabled.
140 */
141bool ice_is_vf_disabled(struct ice_vf *vf)
142{
143 struct ice_pf *pf = vf->pf;
144
145 return (test_bit(ICE_VF_DIS, pf->state) ||
146 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
147}
148
149/**
150 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
151 * @vf: The VF being resseting
152 *
153 * The max poll time is about ~800ms, which is about the maximum time it takes
154 * for a VF to be reset and/or a VF driver to be removed.
155 */
156static void ice_wait_on_vf_reset(struct ice_vf *vf)
157{
158 int i;
159
160 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
161 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
162 break;
163 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
164 }
165}
166
167/**
168 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
169 * @vf: VF to check if it's ready to be configured/queried
170 *
171 * The purpose of this function is to make sure the VF is not in reset, not
172 * disabled, and initialized so it can be configured and/or queried by a host
173 * administrator.
174 */
175int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
176{
177 ice_wait_on_vf_reset(vf);
178
179 if (ice_is_vf_disabled(vf))
180 return -EINVAL;
181
182 if (ice_check_vf_init(vf))
183 return -EBUSY;
184
185 return 0;
186}
187
188/**
189 * ice_trigger_vf_reset - Reset a VF on HW
190 * @vf: pointer to the VF structure
191 * @is_vflr: true if VFLR was issued, false if not
192 * @is_pfr: true if the reset was triggered due to a previous PFR
193 *
194 * Trigger hardware to start a reset for a particular VF. Expects the caller
195 * to wait the proper amount of time to allow hardware to reset the VF before
196 * it cleans up and restores VF functionality.
197 */
198static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
199{
200 /* Inform VF that it is no longer active, as a warning */
201 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
202
203 /* Disable VF's configuration API during reset. The flag is re-enabled
204 * when it's safe again to access VF's VSI.
205 */
206 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
207
208 /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
209 * needs to clear them in the case of VFR/VFLR. If this is done for
210 * PFR, it can mess up VF resets because the VF driver may already
211 * have started cleanup by the time we get here.
212 */
213 if (!is_pfr)
214 vf->vf_ops->clear_mbx_register(vf);
215
216 vf->vf_ops->trigger_reset_register(vf, is_vflr);
217}
218
219static void ice_vf_clear_counters(struct ice_vf *vf)
220{
221 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
222
223 if (vsi)
224 vsi->num_vlan = 0;
225
226 vf->num_mac = 0;
227 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
228 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
229}
230
231/**
232 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
233 * @vf: VF to perform pre VSI rebuild tasks
234 *
235 * These tasks are items that don't need to be amortized since they are most
236 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
237 */
238static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
239{
240 ice_vf_clear_counters(vf);
241 vf->vf_ops->clear_reset_trigger(vf);
242}
243
244/**
245 * ice_vf_rebuild_vsi - rebuild the VF's VSI
246 * @vf: VF to rebuild the VSI for
247 *
248 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
249 * host, PFR, CORER, etc.).
250 */
251static int ice_vf_rebuild_vsi(struct ice_vf *vf)
252{
253 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
254 struct ice_pf *pf = vf->pf;
255
256 if (WARN_ON(!vsi))
257 return -EINVAL;
258
259 if (ice_vsi_rebuild(vsi, true)) {
260 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
261 vf->vf_id);
262 return -EIO;
263 }
264 /* vsi->idx will remain the same in this case so don't update
265 * vf->lan_vsi_idx
266 */
267 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
268 vf->lan_vsi_num = vsi->vsi_num;
269
270 return 0;
271}
272
273/**
274 * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
275 * are in unicast promiscuous mode
276 * @pf: PF structure for accessing VF(s)
277 *
278 * Return false if no VF(s) are in unicast promiscuous mode,
279 * else return true
280 */
281bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
282{
283 bool is_vf_promisc = false;
284 struct ice_vf *vf;
285 unsigned int bkt;
286
287 rcu_read_lock();
288 ice_for_each_vf_rcu(pf, bkt, vf) {
289 /* found a VF that has promiscuous mode configured */
290 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
291 is_vf_promisc = true;
292 break;
293 }
294 }
295 rcu_read_unlock();
296
297 return is_vf_promisc;
298}
299
300/**
301 * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
302 * @vf: the VF pointer
303 * @vsi: the VSI to configure
304 * @ucast_m: promiscuous mask to apply to unicast
305 * @mcast_m: promiscuous mask to apply to multicast
306 *
307 * Decide which mask should be used for unicast and multicast filter,
308 * based on presence of VLANs
309 */
310void
311ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
312 u8 *ucast_m, u8 *mcast_m)
313{
314 if (ice_vf_is_port_vlan_ena(vf) ||
315 ice_vsi_has_non_zero_vlans(vsi)) {
316 *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
317 *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
318 } else {
319 *mcast_m = ICE_MCAST_PROMISC_BITS;
320 *ucast_m = ICE_UCAST_PROMISC_BITS;
321 }
322}
323
324/**
325 * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
326 * @vf: the VF pointer
327 * @vsi: the VSI to configure
328 *
329 * Clear all promiscuous/allmulticast filters for a VF
330 */
331static int
332ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
333{
334 struct ice_pf *pf = vf->pf;
335 u8 ucast_m, mcast_m;
336 int ret = 0;
337
338 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
339 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
340 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
341 if (ice_is_dflt_vsi_in_use(vsi->port_info))
342 ret = ice_clear_dflt_vsi(vsi);
343 } else {
344 ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
345 }
346
347 if (ret) {
348 dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
349 } else {
350 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
351 dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
352 }
353 }
354
355 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
356 ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
357 if (ret) {
358 dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
359 } else {
360 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
361 dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
362 }
363 }
364 return ret;
365}
366
367/**
368 * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
369 * @vf: the VF to configure
370 * @vsi: the VF's VSI
371 * @promisc_m: the promiscuous mode to enable
372 */
373int
374ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
375{
376 struct ice_hw *hw = &vsi->back->hw;
377 int status;
378
379 if (ice_vf_is_port_vlan_ena(vf))
380 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
381 ice_vf_get_port_vlan_id(vf));
382 else if (ice_vsi_has_non_zero_vlans(vsi))
383 status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
384 else
385 status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
386
387 if (status && status != -EEXIST) {
388 dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
389 vf->vf_id, status);
390 return status;
391 }
392
393 return 0;
394}
395
396/**
397 * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
398 * @vf: the VF to configure
399 * @vsi: the VF's VSI
400 * @promisc_m: the promiscuous mode to disable
401 */
402int
403ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
404{
405 struct ice_hw *hw = &vsi->back->hw;
406 int status;
407
408 if (ice_vf_is_port_vlan_ena(vf))
409 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
410 ice_vf_get_port_vlan_id(vf));
411 else if (ice_vsi_has_non_zero_vlans(vsi))
412 status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
413 else
414 status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
415
416 if (status && status != -ENOENT) {
417 dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
418 vf->vf_id, status);
419 return status;
420 }
421
422 return 0;
423}
424
425/**
426 * ice_reset_all_vfs - reset all allocated VFs in one go
427 * @pf: pointer to the PF structure
428 *
429 * Reset all VFs at once, in response to a PF or other device reset.
430 *
431 * First, tell the hardware to reset each VF, then do all the waiting in one
432 * chunk, and finally finish restoring each VF after the wait. This is useful
433 * during PF routines which need to reset all VFs, as otherwise it must perform
434 * these resets in a serialized fashion.
435 */
436void ice_reset_all_vfs(struct ice_pf *pf)
437{
438 struct device *dev = ice_pf_to_dev(pf);
439 struct ice_hw *hw = &pf->hw;
440 struct ice_vf *vf;
441 unsigned int bkt;
442
443 /* If we don't have any VFs, then there is nothing to reset */
444 if (!ice_has_vfs(pf))
445 return;
446
447 mutex_lock(&pf->vfs.table_lock);
448
449 /* clear all malicious info if the VFs are getting reset */
450 ice_for_each_vf(pf, bkt, vf)
451 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
452 ICE_MAX_SRIOV_VFS, vf->vf_id))
453 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
454 vf->vf_id);
455
456 /* If VFs have been disabled, there is no need to reset */
457 if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
458 mutex_unlock(&pf->vfs.table_lock);
459 return;
460 }
461
462 /* Begin reset on all VFs at once */
463 ice_for_each_vf(pf, bkt, vf)
464 ice_trigger_vf_reset(vf, true, true);
465
466 /* HW requires some time to make sure it can flush the FIFO for a VF
467 * when it resets it. Now that we've triggered all of the VFs, iterate
468 * the table again and wait for each VF to complete.
469 */
470 ice_for_each_vf(pf, bkt, vf) {
471 if (!vf->vf_ops->poll_reset_status(vf)) {
472 /* Display a warning if at least one VF didn't manage
473 * to reset in time, but continue on with the
474 * operation.
475 */
476 dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
477 break;
478 }
479 }
480
481 /* free VF resources to begin resetting the VSI state */
482 ice_for_each_vf(pf, bkt, vf) {
483 mutex_lock(&vf->cfg_lock);
484
485 vf->driver_caps = 0;
486 ice_vc_set_default_allowlist(vf);
487
488 ice_vf_fdir_exit(vf);
489 ice_vf_fdir_init(vf);
490 /* clean VF control VSI when resetting VFs since it should be
491 * setup only when VF creates its first FDIR rule.
492 */
493 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
494 ice_vf_ctrl_invalidate_vsi(vf);
495
496 ice_vf_pre_vsi_rebuild(vf);
497 ice_vf_rebuild_vsi(vf);
498 vf->vf_ops->post_vsi_rebuild(vf);
499
500 mutex_unlock(&vf->cfg_lock);
501 }
502
503 if (ice_is_eswitch_mode_switchdev(pf))
504 if (ice_eswitch_rebuild(pf))
505 dev_warn(dev, "eswitch rebuild failed\n");
506
507 ice_flush(hw);
508 clear_bit(ICE_VF_DIS, pf->state);
509
510 mutex_unlock(&pf->vfs.table_lock);
511}
512
513/**
514 * ice_notify_vf_reset - Notify VF of a reset event
515 * @vf: pointer to the VF structure
516 */
517static void ice_notify_vf_reset(struct ice_vf *vf)
518{
519 struct ice_hw *hw = &vf->pf->hw;
520 struct virtchnl_pf_event pfe;
521
522 /* Bail out if VF is in disabled state, neither initialized, nor active
523 * state - otherwise proceed with notifications
524 */
525 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
526 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
527 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
528 return;
529
530 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
531 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
532 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
533 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
534 NULL);
535}
536
537/**
538 * ice_reset_vf - Reset a particular VF
539 * @vf: pointer to the VF structure
540 * @flags: flags controlling behavior of the reset
541 *
542 * Flags:
543 * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
544 * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
545 * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
546 *
547 * Returns 0 if the VF is currently in reset, if resets are disabled, or if
548 * the VF resets successfully. Returns an error code if the VF fails to
549 * rebuild.
550 */
551int ice_reset_vf(struct ice_vf *vf, u32 flags)
552{
553 struct ice_pf *pf = vf->pf;
554 struct ice_vsi *vsi;
555 struct device *dev;
556 struct ice_hw *hw;
557 int err = 0;
558 bool rsd;
559
560 dev = ice_pf_to_dev(pf);
561 hw = &pf->hw;
562
563 if (flags & ICE_VF_RESET_NOTIFY)
564 ice_notify_vf_reset(vf);
565
566 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
567 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
568 vf->vf_id);
569 return 0;
570 }
571
572 if (ice_is_vf_disabled(vf)) {
573 vsi = ice_get_vf_vsi(vf);
574 if (!vsi) {
575 dev_dbg(dev, "VF is already removed\n");
576 return -EINVAL;
577 }
578 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
579
580 if (ice_vsi_is_rx_queue_active(vsi))
581 ice_vsi_stop_all_rx_rings(vsi);
582
583 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
584 vf->vf_id);
585 return 0;
586 }
587
588 if (flags & ICE_VF_RESET_LOCK)
589 mutex_lock(&vf->cfg_lock);
590 else
591 lockdep_assert_held(&vf->cfg_lock);
592
593 /* Set VF disable bit state here, before triggering reset */
594 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
595 ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
596
597 vsi = ice_get_vf_vsi(vf);
598 if (WARN_ON(!vsi)) {
599 err = -EIO;
600 goto out_unlock;
601 }
602
603 ice_dis_vf_qs(vf);
604
605 /* Call Disable LAN Tx queue AQ whether or not queues are
606 * enabled. This is needed for successful completion of VFR.
607 */
608 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
609 NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
610
611 /* poll VPGEN_VFRSTAT reg to make sure
612 * that reset is complete
613 */
614 rsd = vf->vf_ops->poll_reset_status(vf);
615
616 /* Display a warning if VF didn't manage to reset in time, but need to
617 * continue on with the operation.
618 */
619 if (!rsd)
620 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
621
622 vf->driver_caps = 0;
623 ice_vc_set_default_allowlist(vf);
624
625 /* disable promiscuous modes in case they were enabled
626 * ignore any error if disabling process failed
627 */
628 ice_vf_clear_all_promisc_modes(vf, vsi);
629
630 ice_eswitch_del_vf_mac_rule(vf);
631
632 ice_vf_fdir_exit(vf);
633 ice_vf_fdir_init(vf);
634 /* clean VF control VSI when resetting VF since it should be setup
635 * only when VF creates its first FDIR rule.
636 */
637 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
638 ice_vf_ctrl_vsi_release(vf);
639
640 ice_vf_pre_vsi_rebuild(vf);
641
642 if (vf->vf_ops->vsi_rebuild(vf)) {
643 dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
644 vf->vf_id);
645 err = -EFAULT;
646 goto out_unlock;
647 }
648
649 vf->vf_ops->post_vsi_rebuild(vf);
650 vsi = ice_get_vf_vsi(vf);
651 if (WARN_ON(!vsi)) {
652 err = -EINVAL;
653 goto out_unlock;
654 }
655
656 ice_eswitch_update_repr(vsi);
657 ice_eswitch_replay_vf_mac_rule(vf);
658
659 /* if the VF has been reset allow it to come up again */
660 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
661 ICE_MAX_SRIOV_VFS, vf->vf_id))
662 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
663 vf->vf_id);
664
665out_unlock:
666 if (flags & ICE_VF_RESET_LOCK)
667 mutex_unlock(&vf->cfg_lock);
668
669 return err;
670}
671
672/**
673 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
674 * @vf: pointer to the VF structure
675 */
676void ice_set_vf_state_qs_dis(struct ice_vf *vf)
677{
678 /* Clear Rx/Tx enabled queues flag */
679 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
680 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
681 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
682}
683
684/* Private functions only accessed from other virtualization files */
685
686/**
687 * ice_dis_vf_qs - Disable the VF queues
688 * @vf: pointer to the VF structure
689 */
690void ice_dis_vf_qs(struct ice_vf *vf)
691{
692 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
693
694 if (WARN_ON(!vsi))
695 return;
696
697 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
698 ice_vsi_stop_all_rx_rings(vsi);
699 ice_set_vf_state_qs_dis(vf);
700}
701
702/**
703 * ice_err_to_virt_err - translate errors for VF return code
704 * @err: error return code
705 */
706enum virtchnl_status_code ice_err_to_virt_err(int err)
707{
708 switch (err) {
709 case 0:
710 return VIRTCHNL_STATUS_SUCCESS;
711 case -EINVAL:
712 case -ENODEV:
713 return VIRTCHNL_STATUS_ERR_PARAM;
714 case -ENOMEM:
715 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
716 case -EALREADY:
717 case -EBUSY:
718 case -EIO:
719 case -ENOSPC:
720 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
721 default:
722 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
723 }
724}
725
726/**
727 * ice_check_vf_init - helper to check if VF init complete
728 * @vf: the pointer to the VF to check
729 */
730int ice_check_vf_init(struct ice_vf *vf)
731{
732 struct ice_pf *pf = vf->pf;
733
734 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
735 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
736 vf->vf_id);
737 return -EBUSY;
738 }
739 return 0;
740}
741
742/**
743 * ice_vf_get_port_info - Get the VF's port info structure
744 * @vf: VF used to get the port info structure for
745 */
746struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
747{
748 return vf->pf->hw.port_info;
749}
750
751/**
752 * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
753 * @vsi: the VSI to configure
754 * @enable: whether to enable or disable the spoof checking
755 *
756 * Configure a VSI to enable (or disable) spoof checking behavior.
757 */
758static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
759{
760 struct ice_vsi_ctx *ctx;
761 int err;
762
763 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
764 if (!ctx)
765 return -ENOMEM;
766
767 ctx->info.sec_flags = vsi->info.sec_flags;
768 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
769
770 if (enable)
771 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
772 else
773 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
774
775 err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
776 if (err)
777 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
778 enable ? "ON" : "OFF", vsi->vsi_num, err);
779 else
780 vsi->info.sec_flags = ctx->info.sec_flags;
781
782 kfree(ctx);
783
784 return err;
785}
786
787/**
788 * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
789 * @vsi: VSI to enable Tx spoof checking for
790 */
791static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
792{
793 struct ice_vsi_vlan_ops *vlan_ops;
794 int err = 0;
795
796 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
797
798 /* Allow VF with VLAN 0 only to send all tagged traffic */
799 if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
800 err = vlan_ops->ena_tx_filtering(vsi);
801 if (err)
802 return err;
803 }
804
805 return ice_cfg_mac_antispoof(vsi, true);
806}
807
808/**
809 * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
810 * @vsi: VSI to disable Tx spoof checking for
811 */
812static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
813{
814 struct ice_vsi_vlan_ops *vlan_ops;
815 int err;
816
817 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
818
819 err = vlan_ops->dis_tx_filtering(vsi);
820 if (err)
821 return err;
822
823 return ice_cfg_mac_antispoof(vsi, false);
824}
825
826/**
827 * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
828 * @vsi: VSI associated to the VF
829 * @enable: whether to enable or disable the spoof checking
830 */
831int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
832{
833 int err;
834
835 if (enable)
836 err = ice_vsi_ena_spoofchk(vsi);
837 else
838 err = ice_vsi_dis_spoofchk(vsi);
839
840 return err;
841}
842
843/**
844 * ice_is_vf_trusted
845 * @vf: pointer to the VF info
846 */
847bool ice_is_vf_trusted(struct ice_vf *vf)
848{
849 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
850}
851
852/**
853 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
854 * @vf: the VF to check
855 *
856 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
857 * otherwise
858 */
859bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
860{
861 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
862 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
863}
864
865/**
866 * ice_is_vf_link_up - check if the VF's link is up
867 * @vf: VF to check if link is up
868 */
869bool ice_is_vf_link_up(struct ice_vf *vf)
870{
871 struct ice_port_info *pi = ice_vf_get_port_info(vf);
872
873 if (ice_check_vf_init(vf))
874 return false;
875
876 if (ice_vf_has_no_qs_ena(vf))
877 return false;
878 else if (vf->link_forced)
879 return vf->link_up;
880 else
881 return pi->phy.link_info.link_info &
882 ICE_AQ_LINK_UP;
883}
884
885/**
886 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
887 * @vf: VF to configure trust setting for
888 */
889static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
890{
891 if (vf->trusted)
892 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
893 else
894 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
895}
896
897/**
898 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
899 * @vf: VF to add MAC filters for
900 *
901 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
902 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
903 */
904static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
905{
906 struct device *dev = ice_pf_to_dev(vf->pf);
907 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
908 u8 broadcast[ETH_ALEN];
909 int status;
910
911 if (WARN_ON(!vsi))
912 return -EINVAL;
913
914 if (ice_is_eswitch_mode_switchdev(vf->pf))
915 return 0;
916
917 eth_broadcast_addr(broadcast);
918 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
919 if (status) {
920 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
921 vf->vf_id, status);
922 return status;
923 }
924
925 vf->num_mac++;
926
927 if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
928 status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
929 ICE_FWD_TO_VSI);
930 if (status) {
931 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
932 &vf->hw_lan_addr.addr[0], vf->vf_id,
933 status);
934 return status;
935 }
936 vf->num_mac++;
937
938 ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
939 }
940
941 return 0;
942}
943
944/**
945 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
946 * @vf: VF to add MAC filters for
947 * @vsi: Pointer to VSI
948 *
949 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
950 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
951 */
952static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
953{
954 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
955 struct device *dev = ice_pf_to_dev(vf->pf);
956 int err;
957
958 if (ice_vf_is_port_vlan_ena(vf)) {
959 err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
960 if (err) {
961 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
962 vf->vf_id, err);
963 return err;
964 }
965
966 err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
967 } else {
968 err = ice_vsi_add_vlan_zero(vsi);
969 }
970
971 if (err) {
972 dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
973 ice_vf_is_port_vlan_ena(vf) ?
974 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
975 return err;
976 }
977
978 err = vlan_ops->ena_rx_filtering(vsi);
979 if (err)
980 dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
981 vf->vf_id, vsi->idx, err);
982
983 return 0;
984}
985
986/**
987 * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
988 * @vf: VF to re-apply the configuration for
989 *
990 * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
991 * needs to re-apply the host configured Tx rate limiting configuration.
992 */
993static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
994{
995 struct device *dev = ice_pf_to_dev(vf->pf);
996 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
997 int err;
998
999 if (WARN_ON(!vsi))
1000 return -EINVAL;
1001
1002 if (vf->min_tx_rate) {
1003 err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
1004 if (err) {
1005 dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
1006 vf->min_tx_rate, vf->vf_id, err);
1007 return err;
1008 }
1009 }
1010
1011 if (vf->max_tx_rate) {
1012 err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
1013 if (err) {
1014 dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
1015 vf->max_tx_rate, vf->vf_id, err);
1016 return err;
1017 }
1018 }
1019
1020 return 0;
1021}
1022
1023/**
1024 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
1025 * @vsi: Pointer to VSI
1026 *
1027 * This function moves VSI into corresponding scheduler aggregator node
1028 * based on cached value of "aggregator node info" per VSI
1029 */
1030static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
1031{
1032 struct ice_pf *pf = vsi->back;
1033 struct device *dev;
1034 int status;
1035
1036 if (!vsi->agg_node)
1037 return;
1038
1039 dev = ice_pf_to_dev(pf);
1040 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
1041 dev_dbg(dev,
1042 "agg_id %u already has reached max_num_vsis %u\n",
1043 vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
1044 return;
1045 }
1046
1047 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
1048 vsi->idx, vsi->tc_cfg.ena_tc);
1049 if (status)
1050 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
1051 vsi->idx, vsi->agg_node->agg_id);
1052 else
1053 vsi->agg_node->num_vsis++;
1054}
1055
1056/**
1057 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1058 * @vf: VF to rebuild host configuration on
1059 */
1060void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1061{
1062 struct device *dev = ice_pf_to_dev(vf->pf);
1063 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1064
1065 if (WARN_ON(!vsi))
1066 return;
1067
1068 ice_vf_set_host_trust_cfg(vf);
1069
1070 if (ice_vf_rebuild_host_mac_cfg(vf))
1071 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1072 vf->vf_id);
1073
1074 if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
1075 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1076 vf->vf_id);
1077
1078 if (ice_vf_rebuild_host_tx_rate_cfg(vf))
1079 dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
1080 vf->vf_id);
1081
1082 if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
1083 dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
1084 vf->vf_id);
1085
1086 /* rebuild aggregator node config for main VF VSI */
1087 ice_vf_rebuild_aggregator_node_cfg(vsi);
1088}
1089
1090/**
1091 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
1092 * @vf: VF that control VSI is being invalidated on
1093 */
1094void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
1095{
1096 vf->ctrl_vsi_idx = ICE_NO_VSI;
1097}
1098
1099/**
1100 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
1101 * @vf: VF that control VSI is being released on
1102 */
1103void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
1104{
1105 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
1106 ice_vf_ctrl_invalidate_vsi(vf);
1107}
1108
1109/**
1110 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
1111 * @vf: VF to setup control VSI for
1112 *
1113 * Returns pointer to the successfully allocated VSI struct on success,
1114 * otherwise returns NULL on failure.
1115 */
1116struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
1117{
1118 struct ice_port_info *pi = ice_vf_get_port_info(vf);
1119 struct ice_pf *pf = vf->pf;
1120 struct ice_vsi *vsi;
1121
1122 vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
1123 if (!vsi) {
1124 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
1125 ice_vf_ctrl_invalidate_vsi(vf);
1126 }
1127
1128 return vsi;
1129}
1130
1131/**
1132 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
1133 * @vf: VF to remove access to VSI for
1134 */
1135void ice_vf_invalidate_vsi(struct ice_vf *vf)
1136{
1137 vf->lan_vsi_idx = ICE_NO_VSI;
1138 vf->lan_vsi_num = ICE_NO_VSI;
1139}
1140
1141/**
1142 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1143 * @vf: VF to set in initialized state
1144 *
1145 * After this function the VF will be ready to receive/handle the
1146 * VIRTCHNL_OP_GET_VF_RESOURCES message
1147 */
1148void ice_vf_set_initialized(struct ice_vf *vf)
1149{
1150 ice_set_vf_state_qs_dis(vf);
1151 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1152 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1153 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1154 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1155 memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
1156}