Loading...
Note: File does not exist in v6.13.7.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_lib.h"
6
7/**
8 * ice_err_to_virt err - translate errors for VF return code
9 * @ice_err: error return code
10 */
11static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
12{
13 switch (ice_err) {
14 case ICE_SUCCESS:
15 return VIRTCHNL_STATUS_SUCCESS;
16 case ICE_ERR_BAD_PTR:
17 case ICE_ERR_INVAL_SIZE:
18 case ICE_ERR_DEVICE_NOT_SUPPORTED:
19 case ICE_ERR_PARAM:
20 case ICE_ERR_CFG:
21 return VIRTCHNL_STATUS_ERR_PARAM;
22 case ICE_ERR_NO_MEMORY:
23 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
24 case ICE_ERR_NOT_READY:
25 case ICE_ERR_RESET_FAILED:
26 case ICE_ERR_FW_API_VER:
27 case ICE_ERR_AQ_ERROR:
28 case ICE_ERR_AQ_TIMEOUT:
29 case ICE_ERR_AQ_FULL:
30 case ICE_ERR_AQ_NO_WORK:
31 case ICE_ERR_AQ_EMPTY:
32 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
33 default:
34 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
35 }
36}
37
38/**
39 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
40 * @pf: pointer to the PF structure
41 * @v_opcode: operation code
42 * @v_retval: return value
43 * @msg: pointer to the msg buffer
44 * @msglen: msg length
45 */
46static void
47ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
48 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
49{
50 struct ice_hw *hw = &pf->hw;
51 struct ice_vf *vf = pf->vf;
52 int i;
53
54 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
55 /* Not all vfs are enabled so skip the ones that are not */
56 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
57 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
58 continue;
59
60 /* Ignore return value on purpose - a given VF may fail, but
61 * we need to keep going and send to all of them
62 */
63 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
64 msglen, NULL);
65 }
66}
67
68/**
69 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
70 * @vf: pointer to the VF structure
71 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
72 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
73 * @link_up: whether or not to set the link up/down
74 */
75static void
76ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
77 int ice_link_speed, bool link_up)
78{
79 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
80 pfe->event_data.link_event_adv.link_status = link_up;
81 /* Speed in Mbps */
82 pfe->event_data.link_event_adv.link_speed =
83 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
84 } else {
85 pfe->event_data.link_event.link_status = link_up;
86 /* Legacy method for virtchnl link speeds */
87 pfe->event_data.link_event.link_speed =
88 (enum virtchnl_link_speed)
89 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
90 }
91}
92
93/**
94 * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
95 * @vf: pointer to the VF structure
96 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
97 * @link_up: whether or not to set the link up/down
98 */
99static void
100ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
101 bool link_up)
102{
103 u16 link_speed;
104
105 if (link_up)
106 link_speed = ICE_AQ_LINK_SPEED_100GB;
107 else
108 link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
109
110 ice_set_pfe_link(vf, pfe, link_speed, link_up);
111}
112
113/**
114 * ice_vc_notify_vf_link_state - Inform a VF of link status
115 * @vf: pointer to the VF structure
116 *
117 * send a link status message to a single VF
118 */
119static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
120{
121 struct virtchnl_pf_event pfe = { 0 };
122 struct ice_link_status *ls;
123 struct ice_pf *pf = vf->pf;
124 struct ice_hw *hw;
125
126 hw = &pf->hw;
127 ls = &hw->port_info->phy.link_info;
128
129 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
130 pfe.severity = PF_EVENT_SEVERITY_INFO;
131
132 /* Always report link is down if the VF queues aren't enabled */
133 if (!vf->num_qs_ena)
134 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
135 else if (vf->link_forced)
136 ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
137 else
138 ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
139 ICE_AQ_LINK_UP);
140
141 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
142 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
143 sizeof(pfe), NULL);
144}
145
146/**
147 * ice_free_vf_res - Free a VF's resources
148 * @vf: pointer to the VF info
149 */
150static void ice_free_vf_res(struct ice_vf *vf)
151{
152 struct ice_pf *pf = vf->pf;
153 int i, last_vector_idx;
154
155 /* First, disable VF's configuration API to prevent OS from
156 * accessing the VF's VSI after it's freed or invalidated.
157 */
158 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
159
160 /* free VSI and disconnect it from the parent uplink */
161 if (vf->lan_vsi_idx) {
162 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
163 vf->lan_vsi_idx = 0;
164 vf->lan_vsi_num = 0;
165 vf->num_mac = 0;
166 }
167
168 last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
169 /* Disable interrupts so that VF starts in a known state */
170 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
171 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
172 ice_flush(&pf->hw);
173 }
174 /* reset some of the state variables keeping track of the resources */
175 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
176 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
177}
178
179/**
180 * ice_dis_vf_mappings
181 * @vf: pointer to the VF structure
182 */
183static void ice_dis_vf_mappings(struct ice_vf *vf)
184{
185 struct ice_pf *pf = vf->pf;
186 struct ice_vsi *vsi;
187 int first, last, v;
188 struct ice_hw *hw;
189
190 hw = &pf->hw;
191 vsi = pf->vsi[vf->lan_vsi_idx];
192
193 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
194 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
195
196 first = vf->first_vector_idx;
197 last = first + pf->num_vf_msix - 1;
198 for (v = first; v <= last; v++) {
199 u32 reg;
200
201 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
202 GLINT_VECT2FUNC_IS_PF_M) |
203 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
204 GLINT_VECT2FUNC_PF_NUM_M));
205 wr32(hw, GLINT_VECT2FUNC(v), reg);
206 }
207
208 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
209 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
210 else
211 dev_err(&pf->pdev->dev,
212 "Scattered mode for VF Tx queues is not yet implemented\n");
213
214 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
215 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
216 else
217 dev_err(&pf->pdev->dev,
218 "Scattered mode for VF Rx queues is not yet implemented\n");
219}
220
221/**
222 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
223 * @pf: pointer to the PF structure
224 *
225 * If MSIX entries from the pf->irq_tracker were needed then we need to
226 * reset the irq_tracker->end and give back the entries we needed to
227 * num_avail_sw_msix.
228 *
229 * If no MSIX entries were taken from the pf->irq_tracker then just clear
230 * the pf->sriov_base_vector.
231 *
232 * Returns 0 on success, and -EINVAL on error.
233 */
234static int ice_sriov_free_msix_res(struct ice_pf *pf)
235{
236 struct ice_res_tracker *res;
237
238 if (!pf)
239 return -EINVAL;
240
241 res = pf->irq_tracker;
242 if (!res)
243 return -EINVAL;
244
245 /* give back irq_tracker resources used */
246 if (pf->sriov_base_vector < res->num_entries) {
247 res->end = res->num_entries;
248 pf->num_avail_sw_msix +=
249 res->num_entries - pf->sriov_base_vector;
250 }
251
252 pf->sriov_base_vector = 0;
253
254 return 0;
255}
256
257/**
258 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
259 * @vf: pointer to the VF structure
260 */
261void ice_set_vf_state_qs_dis(struct ice_vf *vf)
262{
263 /* Clear Rx/Tx enabled queues flag */
264 bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
265 bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
266 vf->num_qs_ena = 0;
267 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
268}
269
270/**
271 * ice_dis_vf_qs - Disable the VF queues
272 * @vf: pointer to the VF structure
273 */
274static void ice_dis_vf_qs(struct ice_vf *vf)
275{
276 struct ice_pf *pf = vf->pf;
277 struct ice_vsi *vsi;
278
279 vsi = pf->vsi[vf->lan_vsi_idx];
280
281 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
282 ice_vsi_stop_rx_rings(vsi);
283 ice_set_vf_state_qs_dis(vf);
284}
285
286/**
287 * ice_free_vfs - Free all VFs
288 * @pf: pointer to the PF structure
289 */
290void ice_free_vfs(struct ice_pf *pf)
291{
292 struct ice_hw *hw = &pf->hw;
293 int tmp, i;
294
295 if (!pf->vf)
296 return;
297
298 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
299 usleep_range(1000, 2000);
300
301 /* Avoid wait time by stopping all VFs at the same time */
302 for (i = 0; i < pf->num_alloc_vfs; i++)
303 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
304 ice_dis_vf_qs(&pf->vf[i]);
305
306 /* Disable IOV before freeing resources. This lets any VF drivers
307 * running in the host get themselves cleaned up before we yank
308 * the carpet out from underneath their feet.
309 */
310 if (!pci_vfs_assigned(pf->pdev))
311 pci_disable_sriov(pf->pdev);
312 else
313 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
314
315 tmp = pf->num_alloc_vfs;
316 pf->num_vf_qps = 0;
317 pf->num_alloc_vfs = 0;
318 for (i = 0; i < tmp; i++) {
319 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
320 /* disable VF qp mappings */
321 ice_dis_vf_mappings(&pf->vf[i]);
322 ice_free_vf_res(&pf->vf[i]);
323 }
324 }
325
326 if (ice_sriov_free_msix_res(pf))
327 dev_err(&pf->pdev->dev,
328 "Failed to free MSIX resources used by SR-IOV\n");
329
330 devm_kfree(&pf->pdev->dev, pf->vf);
331 pf->vf = NULL;
332
333 /* This check is for when the driver is unloaded while VFs are
334 * assigned. Setting the number of VFs to 0 through sysfs is caught
335 * before this function ever gets called.
336 */
337 if (!pci_vfs_assigned(pf->pdev)) {
338 int vf_id;
339
340 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
341 * work correctly when SR-IOV gets re-enabled.
342 */
343 for (vf_id = 0; vf_id < tmp; vf_id++) {
344 u32 reg_idx, bit_idx;
345
346 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
347 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
348 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
349 }
350 }
351 clear_bit(__ICE_VF_DIS, pf->state);
352 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
353}
354
355/**
356 * ice_trigger_vf_reset - Reset a VF on HW
357 * @vf: pointer to the VF structure
358 * @is_vflr: true if VFLR was issued, false if not
359 * @is_pfr: true if the reset was triggered due to a previous PFR
360 *
361 * Trigger hardware to start a reset for a particular VF. Expects the caller
362 * to wait the proper amount of time to allow hardware to reset the VF before
363 * it cleans up and restores VF functionality.
364 */
365static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
366{
367 struct ice_pf *pf = vf->pf;
368 u32 reg, reg_idx, bit_idx;
369 struct ice_hw *hw;
370 int vf_abs_id, i;
371
372 hw = &pf->hw;
373 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
374
375 /* Inform VF that it is no longer active, as a warning */
376 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
377
378 /* Disable VF's configuration API during reset. The flag is re-enabled
379 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
380 * It's normally disabled in ice_free_vf_res(), but it's safer
381 * to do it earlier to give some time to finish to any VF config
382 * functions that may still be running at this point.
383 */
384 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
385
386 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
387 * in the case of VFR. If this is done for PFR, it can mess up VF
388 * resets because the VF driver may already have started cleanup
389 * by the time we get here.
390 */
391 if (!is_pfr)
392 wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
393
394 /* In the case of a VFLR, the HW has already reset the VF and we
395 * just need to clean up, so don't hit the VFRTRIG register.
396 */
397 if (!is_vflr) {
398 /* reset VF using VPGEN_VFRTRIG reg */
399 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
400 reg |= VPGEN_VFRTRIG_VFSWR_M;
401 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
402 }
403 /* clear the VFLR bit in GLGEN_VFLRSTAT */
404 reg_idx = (vf_abs_id) / 32;
405 bit_idx = (vf_abs_id) % 32;
406 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
407 ice_flush(hw);
408
409 wr32(hw, PF_PCI_CIAA,
410 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
411 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
412 reg = rd32(hw, PF_PCI_CIAD);
413 /* no transactions pending so stop polling */
414 if ((reg & VF_TRANS_PENDING_M) == 0)
415 break;
416
417 dev_err(&pf->pdev->dev,
418 "VF %d PCI transactions stuck\n", vf->vf_id);
419 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
420 }
421}
422
423/**
424 * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
425 * @ctxt: the VSI ctxt to fill
426 * @vid: the VLAN ID to set as a PVID
427 */
428static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
429{
430 ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
431 ICE_AQ_VSI_PVLAN_INSERT_PVID |
432 ICE_AQ_VSI_VLAN_EMOD_STR);
433 ctxt->info.pvid = cpu_to_le16(vid);
434 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
435 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
436 ICE_AQ_VSI_PROP_SW_VALID);
437}
438
439/**
440 * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
441 * @ctxt: the VSI ctxt to fill
442 */
443static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
444{
445 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
446 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
447 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
448 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
449 ICE_AQ_VSI_PROP_SW_VALID);
450}
451
452/**
453 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
454 * @vsi: the VSI to update
455 * @vid: the VLAN ID to set as a PVID
456 * @enable: true for enable PVID false for disable
457 */
458static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
459{
460 struct device *dev = &vsi->back->pdev->dev;
461 struct ice_hw *hw = &vsi->back->hw;
462 struct ice_vsi_ctx *ctxt;
463 enum ice_status status;
464 int ret = 0;
465
466 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
467 if (!ctxt)
468 return -ENOMEM;
469
470 ctxt->info = vsi->info;
471 if (enable)
472 ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
473 else
474 ice_vsi_kill_pvid_fill_ctxt(ctxt);
475
476 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
477 if (status) {
478 dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
479 status, hw->adminq.sq_last_status);
480 ret = -EIO;
481 goto out;
482 }
483
484 vsi->info = ctxt->info;
485out:
486 devm_kfree(dev, ctxt);
487 return ret;
488}
489
490/**
491 * ice_vf_vsi_setup - Set up a VF VSI
492 * @pf: board private structure
493 * @pi: pointer to the port_info instance
494 * @vf_id: defines VF ID to which this VSI connects.
495 *
496 * Returns pointer to the successfully allocated VSI struct on success,
497 * otherwise returns NULL on failure.
498 */
499static struct ice_vsi *
500ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
501{
502 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
503}
504
505/**
506 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
507 * @pf: pointer to PF structure
508 * @vf: pointer to VF that the first MSIX vector index is being calculated for
509 *
510 * This returns the first MSIX vector index in PF space that is used by this VF.
511 * This index is used when accessing PF relative registers such as
512 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
513 * This will always be the OICR index in the AVF driver so any functionality
514 * using vf->first_vector_idx for queue configuration will have to increment by
515 * 1 to avoid meddling with the OICR index.
516 */
517static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
518{
519 return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
520}
521
522/**
523 * ice_alloc_vsi_res - Setup VF VSI and its resources
524 * @vf: pointer to the VF structure
525 *
526 * Returns 0 on success, negative value on failure
527 */
528static int ice_alloc_vsi_res(struct ice_vf *vf)
529{
530 struct ice_pf *pf = vf->pf;
531 LIST_HEAD(tmp_add_list);
532 u8 broadcast[ETH_ALEN];
533 struct ice_vsi *vsi;
534 int status = 0;
535
536 /* first vector index is the VFs OICR index */
537 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
538
539 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
540 if (!vsi) {
541 dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
542 return -ENOMEM;
543 }
544
545 vf->lan_vsi_idx = vsi->idx;
546 vf->lan_vsi_num = vsi->vsi_num;
547
548 /* Check if port VLAN exist before, and restore it accordingly */
549 if (vf->port_vlan_id) {
550 ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
551 ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
552 }
553
554 eth_broadcast_addr(broadcast);
555
556 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
557 if (status)
558 goto ice_alloc_vsi_res_exit;
559
560 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
561 status = ice_add_mac_to_list(vsi, &tmp_add_list,
562 vf->dflt_lan_addr.addr);
563 if (status)
564 goto ice_alloc_vsi_res_exit;
565 }
566
567 status = ice_add_mac(&pf->hw, &tmp_add_list);
568 if (status)
569 dev_err(&pf->pdev->dev,
570 "could not add mac filters error %d\n", status);
571 else
572 vf->num_mac = 1;
573
574 /* Clear this bit after VF initialization since we shouldn't reclaim
575 * and reassign interrupts for synchronous or asynchronous VFR events.
576 * We don't want to reconfigure interrupts since AVF driver doesn't
577 * expect vector assignment to be changed unless there is a request for
578 * more vectors.
579 */
580ice_alloc_vsi_res_exit:
581 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
582 return status;
583}
584
585/**
586 * ice_alloc_vf_res - Allocate VF resources
587 * @vf: pointer to the VF structure
588 */
589static int ice_alloc_vf_res(struct ice_vf *vf)
590{
591 struct ice_pf *pf = vf->pf;
592 int tx_rx_queue_left;
593 int status;
594
595 /* Update number of VF queues, in case VF had requested for queue
596 * changes
597 */
598 tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
599 ice_get_avail_rxq_count(pf));
600 tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
601 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
602 vf->num_req_qs != vf->num_vf_qs)
603 vf->num_vf_qs = vf->num_req_qs;
604
605 /* setup VF VSI and necessary resources */
606 status = ice_alloc_vsi_res(vf);
607 if (status)
608 goto ice_alloc_vf_res_exit;
609
610 if (vf->trusted)
611 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
612 else
613 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
614
615 /* VF is now completely initialized */
616 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
617
618 return status;
619
620ice_alloc_vf_res_exit:
621 ice_free_vf_res(vf);
622 return status;
623}
624
625/**
626 * ice_ena_vf_mappings
627 * @vf: pointer to the VF structure
628 *
629 * Enable VF vectors and queues allocation by writing the details into
630 * respective registers.
631 */
632static void ice_ena_vf_mappings(struct ice_vf *vf)
633{
634 int abs_vf_id, abs_first, abs_last;
635 struct ice_pf *pf = vf->pf;
636 struct ice_vsi *vsi;
637 int first, last, v;
638 struct ice_hw *hw;
639 u32 reg;
640
641 hw = &pf->hw;
642 vsi = pf->vsi[vf->lan_vsi_idx];
643 first = vf->first_vector_idx;
644 last = (first + pf->num_vf_msix) - 1;
645 abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
646 abs_last = (abs_first + pf->num_vf_msix) - 1;
647 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
648
649 /* VF Vector allocation */
650 reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
651 ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
652 VPINT_ALLOC_VALID_M);
653 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
654
655 reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
656 & VPINT_ALLOC_PCI_FIRST_M) |
657 ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
658 VPINT_ALLOC_PCI_VALID_M);
659 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
660 /* map the interrupts to its functions */
661 for (v = first; v <= last; v++) {
662 reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
663 GLINT_VECT2FUNC_VF_NUM_M) |
664 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
665 GLINT_VECT2FUNC_PF_NUM_M));
666 wr32(hw, GLINT_VECT2FUNC(v), reg);
667 }
668
669 /* Map mailbox interrupt. We put an explicit 0 here to remind us that
670 * VF admin queue interrupts will go to VF MSI-X vector 0.
671 */
672 wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
673 /* set regardless of mapping mode */
674 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
675
676 /* VF Tx queues allocation */
677 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
678 /* set the VF PF Tx queue range
679 * VFNUMQ value should be set to (number of queues - 1). A value
680 * of 0 means 1 queue and a value of 255 means 256 queues
681 */
682 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
683 VPLAN_TX_QBASE_VFFIRSTQ_M) |
684 (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
685 VPLAN_TX_QBASE_VFNUMQ_M));
686 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
687 } else {
688 dev_err(&pf->pdev->dev,
689 "Scattered mode for VF Tx queues is not yet implemented\n");
690 }
691
692 /* set regardless of mapping mode */
693 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
694
695 /* VF Rx queues allocation */
696 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
697 /* set the VF PF Rx queue range
698 * VFNUMQ value should be set to (number of queues - 1). A value
699 * of 0 means 1 queue and a value of 255 means 256 queues
700 */
701 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
702 VPLAN_RX_QBASE_VFFIRSTQ_M) |
703 (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
704 VPLAN_RX_QBASE_VFNUMQ_M));
705 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
706 } else {
707 dev_err(&pf->pdev->dev,
708 "Scattered mode for VF Rx queues is not yet implemented\n");
709 }
710}
711
712/**
713 * ice_determine_res
714 * @pf: pointer to the PF structure
715 * @avail_res: available resources in the PF structure
716 * @max_res: maximum resources that can be given per VF
717 * @min_res: minimum resources that can be given per VF
718 *
719 * Returns non-zero value if resources (queues/vectors) are available or
720 * returns zero if PF cannot accommodate for all num_alloc_vfs.
721 */
722static int
723ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
724{
725 bool checked_min_res = false;
726 int res;
727
728 /* start by checking if PF can assign max number of resources for
729 * all num_alloc_vfs.
730 * if yes, return number per VF
731 * If no, divide by 2 and roundup, check again
732 * repeat the loop till we reach a point where even minimum resources
733 * are not available, in that case return 0
734 */
735 res = max_res;
736 while ((res >= min_res) && !checked_min_res) {
737 int num_all_res;
738
739 num_all_res = pf->num_alloc_vfs * res;
740 if (num_all_res <= avail_res)
741 return res;
742
743 if (res == min_res)
744 checked_min_res = true;
745
746 res = DIV_ROUND_UP(res, 2);
747 }
748 return 0;
749}
750
751/**
752 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
753 * @vf: VF to calculate the register index for
754 * @q_vector: a q_vector associated to the VF
755 */
756int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
757{
758 struct ice_pf *pf;
759
760 if (!vf || !q_vector)
761 return -EINVAL;
762
763 pf = vf->pf;
764
765 /* always add one to account for the OICR being the first MSIX */
766 return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
767 q_vector->v_idx + 1;
768}
769
770/**
771 * ice_get_max_valid_res_idx - Get the max valid resource index
772 * @res: pointer to the resource to find the max valid index for
773 *
774 * Start from the end of the ice_res_tracker and return right when we find the
775 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
776 * valid for SR-IOV because it is the only consumer that manipulates the
777 * res->end and this is always called when res->end is set to res->num_entries.
778 */
779static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
780{
781 int i;
782
783 if (!res)
784 return -EINVAL;
785
786 for (i = res->num_entries - 1; i >= 0; i--)
787 if (res->list[i] & ICE_RES_VALID_BIT)
788 return i;
789
790 return 0;
791}
792
793/**
794 * ice_sriov_set_msix_res - Set any used MSIX resources
795 * @pf: pointer to PF structure
796 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
797 *
798 * This function allows SR-IOV resources to be taken from the end of the PF's
799 * allowed HW MSIX vectors so in many cases the irq_tracker will not
800 * be needed. In these cases we just set the pf->sriov_base_vector and return
801 * success.
802 *
803 * If SR-IOV needs to use any pf->irq_tracker entries it updates the
804 * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
805 * so any calls to ice_get_res() using the irq_tracker will not try to use
806 * resources at or beyond the newly set value.
807 *
808 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
809 * in the PF's space available for SR-IOV.
810 */
811static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
812{
813 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
814 u16 pf_total_msix_vectors =
815 pf->hw.func_caps.common_cap.num_msix_vectors;
816 struct ice_res_tracker *res = pf->irq_tracker;
817 int sriov_base_vector;
818
819 if (max_valid_res_idx < 0)
820 return max_valid_res_idx;
821
822 sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
823
824 /* make sure we only grab irq_tracker entries from the list end and
825 * that we have enough available MSIX vectors
826 */
827 if (sriov_base_vector <= max_valid_res_idx)
828 return -EINVAL;
829
830 pf->sriov_base_vector = sriov_base_vector;
831
832 /* dip into irq_tracker entries and update used resources */
833 if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
834 pf->num_avail_sw_msix -=
835 res->num_entries - pf->sriov_base_vector;
836 res->end = pf->sriov_base_vector;
837 }
838
839 return 0;
840}
841
842/**
843 * ice_check_avail_res - check if vectors and queues are available
844 * @pf: pointer to the PF structure
845 *
846 * This function is where we calculate actual number of resources for VF VSIs,
847 * we don't reserve ahead of time during probe. Returns success if vectors and
848 * queues resources are available, otherwise returns error code
849 */
850static int ice_check_avail_res(struct ice_pf *pf)
851{
852 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
853 u16 num_msix, num_txq, num_rxq, num_avail_msix;
854
855 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
856 return -EINVAL;
857
858 /* add 1 to max_valid_res_idx to account for it being 0-based */
859 num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
860 (max_valid_res_idx + 1);
861
862 /* Grab from HW interrupts common pool
863 * Note: By the time the user decides it needs more vectors in a VF
864 * its already too late since one must decide this prior to creating the
865 * VF interface. So the best we can do is take a guess as to what the
866 * user might want.
867 *
868 * We have two policies for vector allocation:
869 * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
870 * number of NFV VFs used for NFV appliances, since this is a special
871 * case, we try to assign maximum vectors per VF (65) as much as
872 * possible, based on determine_resources algorithm.
873 * 2. if num_alloc_vfs is from 17 to 256, then its large number of
874 * regular VFs which are not used for any special purpose. Hence try to
875 * grab default interrupt vectors (5 as supported by AVF driver).
876 */
877 if (pf->num_alloc_vfs <= 16) {
878 num_msix = ice_determine_res(pf, num_avail_msix,
879 ICE_MAX_INTR_PER_VF,
880 ICE_MIN_INTR_PER_VF);
881 } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
882 num_msix = ice_determine_res(pf, num_avail_msix,
883 ICE_DFLT_INTR_PER_VF,
884 ICE_MIN_INTR_PER_VF);
885 } else {
886 dev_err(&pf->pdev->dev,
887 "Number of VFs %d exceeds max VF count %d\n",
888 pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
889 return -EIO;
890 }
891
892 if (!num_msix)
893 return -EIO;
894
895 /* Grab from the common pool
896 * start by requesting Default queues (4 as supported by AVF driver),
897 * Note that, the main difference between queues and vectors is, latter
898 * can only be reserved at init time but queues can be requested by VF
899 * at runtime through Virtchnl, that is the reason we start by reserving
900 * few queues.
901 */
902 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
903 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
904
905 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
906 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
907
908 if (!num_txq || !num_rxq)
909 return -EIO;
910
911 if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
912 return -EINVAL;
913
914 /* since AVF driver works with only queue pairs which means, it expects
915 * to have equal number of Rx and Tx queues, so take the minimum of
916 * available Tx or Rx queues
917 */
918 pf->num_vf_qps = min_t(int, num_txq, num_rxq);
919 pf->num_vf_msix = num_msix;
920
921 return 0;
922}
923
924/**
925 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
926 * @vf: pointer to the VF structure
927 *
928 * Cleanup a VF after the hardware reset is finished. Expects the caller to
929 * have verified whether the reset is finished properly, and ensure the
930 * minimum amount of wait time has passed. Reallocate VF resources back to make
931 * VF state active
932 */
933static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
934{
935 struct ice_pf *pf = vf->pf;
936 struct ice_hw *hw;
937 u32 reg;
938
939 hw = &pf->hw;
940
941 /* PF software completes the flow by notifying VF that reset flow is
942 * completed. This is done by enabling hardware by clearing the reset
943 * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
944 * register to VFR completed (done at the end of this function)
945 * By doing this we allow HW to access VF memory at any point. If we
946 * did it any sooner, HW could access memory while it was being freed
947 * in ice_free_vf_res(), causing an IOMMU fault.
948 *
949 * On the other hand, this needs to be done ASAP, because the VF driver
950 * is waiting for this to happen and may report a timeout. It's
951 * harmless, but it gets logged into Guest OS kernel log, so best avoid
952 * it.
953 */
954 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
955 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
956 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
957
958 /* reallocate VF resources to finish resetting the VSI state */
959 if (!ice_alloc_vf_res(vf)) {
960 ice_ena_vf_mappings(vf);
961 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
962 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
963 vf->num_vlan = 0;
964 }
965
966 /* Tell the VF driver the reset is done. This needs to be done only
967 * after VF has been fully initialized, because the VF driver may
968 * request resources immediately after setting this flag.
969 */
970 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
971}
972
973/**
974 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
975 * @vf: pointer to the VF info
976 * @vsi: the VSI being configured
977 * @promisc_m: mask of promiscuous config bits
978 * @rm_promisc: promisc flag request from the VF to remove or add filter
979 *
980 * This function configures VF VSI promiscuous mode, based on the VF requests,
981 * for Unicast, Multicast and VLAN
982 */
983static enum ice_status
984ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
985 bool rm_promisc)
986{
987 struct ice_pf *pf = vf->pf;
988 enum ice_status status = 0;
989 struct ice_hw *hw;
990
991 hw = &pf->hw;
992 if (vf->num_vlan) {
993 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
994 rm_promisc);
995 } else if (vf->port_vlan_id) {
996 if (rm_promisc)
997 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
998 vf->port_vlan_id);
999 else
1000 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1001 vf->port_vlan_id);
1002 } else {
1003 if (rm_promisc)
1004 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1005 0);
1006 else
1007 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1008 0);
1009 }
1010
1011 return status;
1012}
1013
1014/**
1015 * ice_config_res_vfs - Finalize allocation of VFs resources in one go
1016 * @pf: pointer to the PF structure
1017 *
1018 * This function is being called as last part of resetting all VFs, or when
1019 * configuring VFs for the first time, where there is no resource to be freed
1020 * Returns true if resources were properly allocated for all VFs, and false
1021 * otherwise.
1022 */
1023static bool ice_config_res_vfs(struct ice_pf *pf)
1024{
1025 struct ice_hw *hw = &pf->hw;
1026 int v;
1027
1028 if (ice_check_avail_res(pf)) {
1029 dev_err(&pf->pdev->dev,
1030 "Cannot allocate VF resources, try with fewer number of VFs\n");
1031 return false;
1032 }
1033
1034 /* rearm global interrupts */
1035 if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
1036 ice_irq_dynamic_ena(hw, NULL, NULL);
1037
1038 /* Finish resetting each VF and allocate resources */
1039 for (v = 0; v < pf->num_alloc_vfs; v++) {
1040 struct ice_vf *vf = &pf->vf[v];
1041
1042 vf->num_vf_qs = pf->num_vf_qps;
1043 dev_dbg(&pf->pdev->dev,
1044 "VF-id %d has %d queues configured\n",
1045 vf->vf_id, vf->num_vf_qs);
1046 ice_cleanup_and_realloc_vf(vf);
1047 }
1048
1049 ice_flush(hw);
1050 clear_bit(__ICE_VF_DIS, pf->state);
1051
1052 return true;
1053}
1054
1055/**
1056 * ice_reset_all_vfs - reset all allocated VFs in one go
1057 * @pf: pointer to the PF structure
1058 * @is_vflr: true if VFLR was issued, false if not
1059 *
1060 * First, tell the hardware to reset each VF, then do all the waiting in one
1061 * chunk, and finally finish restoring each VF after the wait. This is useful
1062 * during PF routines which need to reset all VFs, as otherwise it must perform
1063 * these resets in a serialized fashion.
1064 *
1065 * Returns true if any VFs were reset, and false otherwise.
1066 */
1067bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1068{
1069 struct ice_hw *hw = &pf->hw;
1070 struct ice_vf *vf;
1071 int v, i;
1072
1073 /* If we don't have any VFs, then there is nothing to reset */
1074 if (!pf->num_alloc_vfs)
1075 return false;
1076
1077 /* If VFs have been disabled, there is no need to reset */
1078 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1079 return false;
1080
1081 /* Begin reset on all VFs at once */
1082 for (v = 0; v < pf->num_alloc_vfs; v++)
1083 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1084
1085 for (v = 0; v < pf->num_alloc_vfs; v++) {
1086 struct ice_vsi *vsi;
1087
1088 vf = &pf->vf[v];
1089 vsi = pf->vsi[vf->lan_vsi_idx];
1090 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1091 ice_dis_vf_qs(vf);
1092 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1093 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1094 }
1095
1096 /* HW requires some time to make sure it can flush the FIFO for a VF
1097 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1098 * sequence to make sure that it has completed. We'll keep track of
1099 * the VFs using a simple iterator that increments once that VF has
1100 * finished resetting.
1101 */
1102 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1103
1104 /* Check each VF in sequence */
1105 while (v < pf->num_alloc_vfs) {
1106 u32 reg;
1107
1108 vf = &pf->vf[v];
1109 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1110 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1111 /* only delay if the check failed */
1112 usleep_range(10, 20);
1113 break;
1114 }
1115
1116 /* If the current VF has finished resetting, move on
1117 * to the next VF in sequence.
1118 */
1119 v++;
1120 }
1121 }
1122
1123 /* Display a warning if at least one VF didn't manage to reset in
1124 * time, but continue on with the operation.
1125 */
1126 if (v < pf->num_alloc_vfs)
1127 dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
1128
1129 /* free VF resources to begin resetting the VSI state */
1130 for (v = 0; v < pf->num_alloc_vfs; v++) {
1131 vf = &pf->vf[v];
1132
1133 ice_free_vf_res(vf);
1134
1135 /* Free VF queues as well, and reallocate later.
1136 * If a given VF has different number of queues
1137 * configured, the request for update will come
1138 * via mailbox communication.
1139 */
1140 vf->num_vf_qs = 0;
1141 }
1142
1143 if (ice_sriov_free_msix_res(pf))
1144 dev_err(&pf->pdev->dev,
1145 "Failed to free MSIX resources used by SR-IOV\n");
1146
1147 if (!ice_config_res_vfs(pf))
1148 return false;
1149
1150 return true;
1151}
1152
1153/**
1154 * ice_reset_vf - Reset a particular VF
1155 * @vf: pointer to the VF structure
1156 * @is_vflr: true if VFLR was issued, false if not
1157 *
1158 * Returns true if the VF is reset, false otherwise.
1159 */
1160static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1161{
1162 struct ice_pf *pf = vf->pf;
1163 struct ice_vsi *vsi;
1164 struct ice_hw *hw;
1165 bool rsd = false;
1166 u8 promisc_m;
1167 u32 reg;
1168 int i;
1169
1170 /* If the PF has been disabled, there is no need resetting VF until
1171 * PF is active again.
1172 */
1173 if (test_bit(__ICE_VF_DIS, pf->state))
1174 return false;
1175
1176 /* If the VF has been disabled, this means something else is
1177 * resetting the VF, so we shouldn't continue. Otherwise, set
1178 * disable VF state bit for actual reset, and continue.
1179 */
1180 if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
1181 return false;
1182
1183 ice_trigger_vf_reset(vf, is_vflr, false);
1184
1185 vsi = pf->vsi[vf->lan_vsi_idx];
1186
1187 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1188 ice_dis_vf_qs(vf);
1189
1190 /* Call Disable LAN Tx queue AQ whether or not queues are
1191 * enabled. This is needed for successful completion of VFR.
1192 */
1193 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1194 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1195
1196 hw = &pf->hw;
1197 /* poll VPGEN_VFRSTAT reg to make sure
1198 * that reset is complete
1199 */
1200 for (i = 0; i < 10; i++) {
1201 /* VF reset requires driver to first reset the VF and then
1202 * poll the status register to make sure that the reset
1203 * completed successfully.
1204 */
1205 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1206 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1207 rsd = true;
1208 break;
1209 }
1210
1211 /* only sleep if the reset is not done */
1212 usleep_range(10, 20);
1213 }
1214
1215 /* Display a warning if VF didn't manage to reset in time, but need to
1216 * continue on with the operation.
1217 */
1218 if (!rsd)
1219 dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1220 vf->vf_id);
1221
1222 /* disable promiscuous modes in case they were enabled
1223 * ignore any error if disabling process failed
1224 */
1225 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1226 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1227 if (vf->port_vlan_id || vf->num_vlan)
1228 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1229 else
1230 promisc_m = ICE_UCAST_PROMISC_BITS;
1231
1232 vsi = pf->vsi[vf->lan_vsi_idx];
1233 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1234 dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
1235 }
1236
1237 /* free VF resources to begin resetting the VSI state */
1238 ice_free_vf_res(vf);
1239
1240 ice_cleanup_and_realloc_vf(vf);
1241
1242 ice_flush(hw);
1243
1244 return true;
1245}
1246
1247/**
1248 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1249 * @pf: pointer to the PF structure
1250 */
1251void ice_vc_notify_link_state(struct ice_pf *pf)
1252{
1253 int i;
1254
1255 for (i = 0; i < pf->num_alloc_vfs; i++)
1256 ice_vc_notify_vf_link_state(&pf->vf[i]);
1257}
1258
1259/**
1260 * ice_vc_notify_reset - Send pending reset message to all VFs
1261 * @pf: pointer to the PF structure
1262 *
1263 * indicate a pending reset to all VFs on a given PF
1264 */
1265void ice_vc_notify_reset(struct ice_pf *pf)
1266{
1267 struct virtchnl_pf_event pfe;
1268
1269 if (!pf->num_alloc_vfs)
1270 return;
1271
1272 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1273 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1274 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1275 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1276}
1277
1278/**
1279 * ice_vc_notify_vf_reset - Notify VF of a reset event
1280 * @vf: pointer to the VF structure
1281 */
1282static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1283{
1284 struct virtchnl_pf_event pfe;
1285
1286 /* validate the request */
1287 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1288 return;
1289
1290 /* verify if the VF is in either init or active before proceeding */
1291 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1292 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1293 return;
1294
1295 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1296 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1297 ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1298 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1299 NULL);
1300}
1301
1302/**
1303 * ice_alloc_vfs - Allocate and set up VFs resources
1304 * @pf: pointer to the PF structure
1305 * @num_alloc_vfs: number of VFs to allocate
1306 */
1307static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1308{
1309 struct ice_hw *hw = &pf->hw;
1310 struct ice_vf *vfs;
1311 int i, ret;
1312
1313 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1314 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1315 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1316 set_bit(__ICE_OICR_INTR_DIS, pf->state);
1317 ice_flush(hw);
1318
1319 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1320 if (ret) {
1321 pf->num_alloc_vfs = 0;
1322 goto err_unroll_intr;
1323 }
1324 /* allocate memory */
1325 vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
1326 GFP_KERNEL);
1327 if (!vfs) {
1328 ret = -ENOMEM;
1329 goto err_pci_disable_sriov;
1330 }
1331 pf->vf = vfs;
1332
1333 /* apply default profile */
1334 for (i = 0; i < num_alloc_vfs; i++) {
1335 vfs[i].pf = pf;
1336 vfs[i].vf_sw_id = pf->first_sw;
1337 vfs[i].vf_id = i;
1338
1339 /* assign default capabilities */
1340 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1341 vfs[i].spoofchk = true;
1342 }
1343 pf->num_alloc_vfs = num_alloc_vfs;
1344
1345 /* VF resources get allocated with initialization */
1346 if (!ice_config_res_vfs(pf)) {
1347 ret = -EIO;
1348 goto err_unroll_sriov;
1349 }
1350
1351 return ret;
1352
1353err_unroll_sriov:
1354 pf->vf = NULL;
1355 devm_kfree(&pf->pdev->dev, vfs);
1356 vfs = NULL;
1357 pf->num_alloc_vfs = 0;
1358err_pci_disable_sriov:
1359 pci_disable_sriov(pf->pdev);
1360err_unroll_intr:
1361 /* rearm interrupts here */
1362 ice_irq_dynamic_ena(hw, NULL, NULL);
1363 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1364 return ret;
1365}
1366
1367/**
1368 * ice_pf_state_is_nominal - checks the PF for nominal state
1369 * @pf: pointer to PF to check
1370 *
1371 * Check the PF's state for a collection of bits that would indicate
1372 * the PF is in a state that would inhibit normal operation for
1373 * driver functionality.
1374 *
1375 * Returns true if PF is in a nominal state.
1376 * Returns false otherwise
1377 */
1378static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1379{
1380 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1381
1382 if (!pf)
1383 return false;
1384
1385 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1386 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1387 return false;
1388
1389 return true;
1390}
1391
1392/**
1393 * ice_pci_sriov_ena - Enable or change number of VFs
1394 * @pf: pointer to the PF structure
1395 * @num_vfs: number of VFs to allocate
1396 */
1397static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1398{
1399 int pre_existing_vfs = pci_num_vf(pf->pdev);
1400 struct device *dev = &pf->pdev->dev;
1401 int err;
1402
1403 if (!ice_pf_state_is_nominal(pf)) {
1404 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1405 return -EBUSY;
1406 }
1407
1408 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1409 dev_err(dev, "This device is not capable of SR-IOV\n");
1410 return -ENODEV;
1411 }
1412
1413 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1414 ice_free_vfs(pf);
1415 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1416 return num_vfs;
1417
1418 if (num_vfs > pf->num_vfs_supported) {
1419 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1420 num_vfs, pf->num_vfs_supported);
1421 return -ENOTSUPP;
1422 }
1423
1424 dev_info(dev, "Allocating %d VFs\n", num_vfs);
1425 err = ice_alloc_vfs(pf, num_vfs);
1426 if (err) {
1427 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1428 return err;
1429 }
1430
1431 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1432 return num_vfs;
1433}
1434
1435/**
1436 * ice_sriov_configure - Enable or change number of VFs via sysfs
1437 * @pdev: pointer to a pci_dev structure
1438 * @num_vfs: number of VFs to allocate
1439 *
1440 * This function is called when the user updates the number of VFs in sysfs.
1441 */
1442int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1443{
1444 struct ice_pf *pf = pci_get_drvdata(pdev);
1445
1446 if (ice_is_safe_mode(pf)) {
1447 dev_err(&pf->pdev->dev,
1448 "SR-IOV cannot be configured - Device is in Safe Mode\n");
1449 return -EOPNOTSUPP;
1450 }
1451
1452 if (num_vfs)
1453 return ice_pci_sriov_ena(pf, num_vfs);
1454
1455 if (!pci_vfs_assigned(pdev)) {
1456 ice_free_vfs(pf);
1457 } else {
1458 dev_err(&pf->pdev->dev,
1459 "can't free VFs because some are assigned to VMs.\n");
1460 return -EBUSY;
1461 }
1462
1463 return 0;
1464}
1465
1466/**
1467 * ice_process_vflr_event - Free VF resources via IRQ calls
1468 * @pf: pointer to the PF structure
1469 *
1470 * called from the VFLR IRQ handler to
1471 * free up VF resources and state variables
1472 */
1473void ice_process_vflr_event(struct ice_pf *pf)
1474{
1475 struct ice_hw *hw = &pf->hw;
1476 int vf_id;
1477 u32 reg;
1478
1479 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1480 !pf->num_alloc_vfs)
1481 return;
1482
1483 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1484 struct ice_vf *vf = &pf->vf[vf_id];
1485 u32 reg_idx, bit_idx;
1486
1487 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1488 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1489 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1490 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1491 if (reg & BIT(bit_idx))
1492 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1493 ice_reset_vf(vf, true);
1494 }
1495}
1496
1497/**
1498 * ice_vc_dis_vf - Disable a given VF via SW reset
1499 * @vf: pointer to the VF info
1500 *
1501 * Disable the VF through a SW reset
1502 */
1503static void ice_vc_dis_vf(struct ice_vf *vf)
1504{
1505 ice_vc_notify_vf_reset(vf);
1506 ice_reset_vf(vf, false);
1507}
1508
1509/**
1510 * ice_vc_send_msg_to_vf - Send message to VF
1511 * @vf: pointer to the VF info
1512 * @v_opcode: virtual channel opcode
1513 * @v_retval: virtual channel return value
1514 * @msg: pointer to the msg buffer
1515 * @msglen: msg length
1516 *
1517 * send msg to VF
1518 */
1519static int
1520ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1521 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1522{
1523 enum ice_status aq_ret;
1524 struct ice_pf *pf;
1525
1526 /* validate the request */
1527 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1528 return -EINVAL;
1529
1530 pf = vf->pf;
1531
1532 /* single place to detect unsuccessful return values */
1533 if (v_retval) {
1534 vf->num_inval_msgs++;
1535 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1536 vf->vf_id, v_opcode, v_retval);
1537 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1538 dev_err(&pf->pdev->dev,
1539 "Number of invalid messages exceeded for VF %d\n",
1540 vf->vf_id);
1541 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1542 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1543 return -EIO;
1544 }
1545 } else {
1546 vf->num_valid_msgs++;
1547 /* reset the invalid counter, if a valid message is received. */
1548 vf->num_inval_msgs = 0;
1549 }
1550
1551 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1552 msg, msglen, NULL);
1553 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1554 dev_info(&pf->pdev->dev,
1555 "Unable to send the message to VF %d ret %d aq_err %d\n",
1556 vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
1557 return -EIO;
1558 }
1559
1560 return 0;
1561}
1562
1563/**
1564 * ice_vc_get_ver_msg
1565 * @vf: pointer to the VF info
1566 * @msg: pointer to the msg buffer
1567 *
1568 * called from the VF to request the API version used by the PF
1569 */
1570static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1571{
1572 struct virtchnl_version_info info = {
1573 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1574 };
1575
1576 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1577 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1578 if (VF_IS_V10(&vf->vf_ver))
1579 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1580
1581 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1582 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1583 sizeof(struct virtchnl_version_info));
1584}
1585
1586/**
1587 * ice_vc_get_vf_res_msg
1588 * @vf: pointer to the VF info
1589 * @msg: pointer to the msg buffer
1590 *
1591 * called from the VF to request its resources
1592 */
1593static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1594{
1595 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1596 struct virtchnl_vf_resource *vfres = NULL;
1597 struct ice_pf *pf = vf->pf;
1598 struct ice_vsi *vsi;
1599 int len = 0;
1600 int ret;
1601
1602 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1603 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1604 goto err;
1605 }
1606
1607 len = sizeof(struct virtchnl_vf_resource);
1608
1609 vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
1610 if (!vfres) {
1611 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1612 len = 0;
1613 goto err;
1614 }
1615 if (VF_IS_V11(&vf->vf_ver))
1616 vf->driver_caps = *(u32 *)msg;
1617 else
1618 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1619 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1620 VIRTCHNL_VF_OFFLOAD_VLAN;
1621
1622 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1623 vsi = pf->vsi[vf->lan_vsi_idx];
1624 if (!vsi) {
1625 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1626 goto err;
1627 }
1628
1629 if (!vsi->info.pvid)
1630 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1631
1632 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1633 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1634 } else {
1635 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1636 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1637 else
1638 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1639 }
1640
1641 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1642 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1643
1644 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1645 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1646
1647 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1648 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1649
1650 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1651 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1652
1653 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1654 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1655
1656 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1657 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1658
1659 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1660 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1661
1662 vfres->num_vsis = 1;
1663 /* Tx and Rx queue are equal for VF */
1664 vfres->num_queue_pairs = vsi->num_txq;
1665 vfres->max_vectors = pf->num_vf_msix;
1666 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1667 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1668
1669 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1670 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1671 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1672 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1673 vf->dflt_lan_addr.addr);
1674
1675 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1676
1677err:
1678 /* send the response back to the VF */
1679 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1680 (u8 *)vfres, len);
1681
1682 devm_kfree(&pf->pdev->dev, vfres);
1683 return ret;
1684}
1685
1686/**
1687 * ice_vc_reset_vf_msg
1688 * @vf: pointer to the VF info
1689 *
1690 * called from the VF to reset itself,
1691 * unlike other virtchnl messages, PF driver
1692 * doesn't send the response back to the VF
1693 */
1694static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1695{
1696 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1697 ice_reset_vf(vf, false);
1698}
1699
1700/**
1701 * ice_find_vsi_from_id
1702 * @pf: the PF structure to search for the VSI
1703 * @id: ID of the VSI it is searching for
1704 *
1705 * searches for the VSI with the given ID
1706 */
1707static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1708{
1709 int i;
1710
1711 ice_for_each_vsi(pf, i)
1712 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1713 return pf->vsi[i];
1714
1715 return NULL;
1716}
1717
1718/**
1719 * ice_vc_isvalid_vsi_id
1720 * @vf: pointer to the VF info
1721 * @vsi_id: VF relative VSI ID
1722 *
1723 * check for the valid VSI ID
1724 */
1725static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1726{
1727 struct ice_pf *pf = vf->pf;
1728 struct ice_vsi *vsi;
1729
1730 vsi = ice_find_vsi_from_id(pf, vsi_id);
1731
1732 return (vsi && (vsi->vf_id == vf->vf_id));
1733}
1734
1735/**
1736 * ice_vc_isvalid_q_id
1737 * @vf: pointer to the VF info
1738 * @vsi_id: VSI ID
1739 * @qid: VSI relative queue ID
1740 *
1741 * check for the valid queue ID
1742 */
1743static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1744{
1745 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1746 /* allocated Tx and Rx queues should be always equal for VF VSI */
1747 return (vsi && (qid < vsi->alloc_txq));
1748}
1749
1750/**
1751 * ice_vc_isvalid_ring_len
1752 * @ring_len: length of ring
1753 *
1754 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
1755 * or zero
1756 */
1757static bool ice_vc_isvalid_ring_len(u16 ring_len)
1758{
1759 return ring_len == 0 ||
1760 (ring_len >= ICE_MIN_NUM_DESC &&
1761 ring_len <= ICE_MAX_NUM_DESC &&
1762 !(ring_len % ICE_REQ_DESC_MULTIPLE));
1763}
1764
1765/**
1766 * ice_vc_config_rss_key
1767 * @vf: pointer to the VF info
1768 * @msg: pointer to the msg buffer
1769 *
1770 * Configure the VF's RSS key
1771 */
1772static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1773{
1774 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1775 struct virtchnl_rss_key *vrk =
1776 (struct virtchnl_rss_key *)msg;
1777 struct ice_pf *pf = vf->pf;
1778 struct ice_vsi *vsi = NULL;
1779
1780 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1781 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1782 goto error_param;
1783 }
1784
1785 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1786 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1787 goto error_param;
1788 }
1789
1790 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1791 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1792 goto error_param;
1793 }
1794
1795 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1796 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1797 goto error_param;
1798 }
1799
1800 vsi = pf->vsi[vf->lan_vsi_idx];
1801 if (!vsi) {
1802 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1803 goto error_param;
1804 }
1805
1806 if (ice_set_rss(vsi, vrk->key, NULL, 0))
1807 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1808error_param:
1809 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1810 NULL, 0);
1811}
1812
1813/**
1814 * ice_vc_config_rss_lut
1815 * @vf: pointer to the VF info
1816 * @msg: pointer to the msg buffer
1817 *
1818 * Configure the VF's RSS LUT
1819 */
1820static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1821{
1822 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1823 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1824 struct ice_pf *pf = vf->pf;
1825 struct ice_vsi *vsi = NULL;
1826
1827 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1828 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1829 goto error_param;
1830 }
1831
1832 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1833 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1834 goto error_param;
1835 }
1836
1837 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
1838 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1839 goto error_param;
1840 }
1841
1842 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1843 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1844 goto error_param;
1845 }
1846
1847 vsi = pf->vsi[vf->lan_vsi_idx];
1848 if (!vsi) {
1849 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1850 goto error_param;
1851 }
1852
1853 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
1854 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1855error_param:
1856 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1857 NULL, 0);
1858}
1859
1860/**
1861 * ice_vc_get_stats_msg
1862 * @vf: pointer to the VF info
1863 * @msg: pointer to the msg buffer
1864 *
1865 * called from the VF to get VSI stats
1866 */
1867static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1868{
1869 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1870 struct virtchnl_queue_select *vqs =
1871 (struct virtchnl_queue_select *)msg;
1872 struct ice_pf *pf = vf->pf;
1873 struct ice_eth_stats stats;
1874 struct ice_vsi *vsi;
1875
1876 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1877 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1878 goto error_param;
1879 }
1880
1881 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1882 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1883 goto error_param;
1884 }
1885
1886 vsi = pf->vsi[vf->lan_vsi_idx];
1887 if (!vsi) {
1888 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1889 goto error_param;
1890 }
1891
1892 memset(&stats, 0, sizeof(struct ice_eth_stats));
1893 ice_update_eth_stats(vsi);
1894
1895 stats = vsi->eth_stats;
1896
1897error_param:
1898 /* send the response to the VF */
1899 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1900 (u8 *)&stats, sizeof(stats));
1901}
1902
1903/**
1904 * ice_vc_ena_qs_msg
1905 * @vf: pointer to the VF info
1906 * @msg: pointer to the msg buffer
1907 *
1908 * called from the VF to enable all or specific queue(s)
1909 */
1910static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1911{
1912 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1913 struct virtchnl_queue_select *vqs =
1914 (struct virtchnl_queue_select *)msg;
1915 struct ice_pf *pf = vf->pf;
1916 struct ice_vsi *vsi;
1917 unsigned long q_map;
1918 u16 vf_q_id;
1919
1920 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1921 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1922 goto error_param;
1923 }
1924
1925 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1926 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1927 goto error_param;
1928 }
1929
1930 if (!vqs->rx_queues && !vqs->tx_queues) {
1931 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1932 goto error_param;
1933 }
1934
1935 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
1936 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
1937 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1938 goto error_param;
1939 }
1940
1941 vsi = pf->vsi[vf->lan_vsi_idx];
1942 if (!vsi) {
1943 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1944 goto error_param;
1945 }
1946
1947 /* Enable only Rx rings, Tx rings were enabled by the FW when the
1948 * Tx queue group list was configured and the context bits were
1949 * programmed using ice_vsi_cfg_txqs
1950 */
1951 q_map = vqs->rx_queues;
1952 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
1953 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1954 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1955 goto error_param;
1956 }
1957
1958 /* Skip queue if enabled */
1959 if (test_bit(vf_q_id, vf->rxq_ena))
1960 continue;
1961
1962 if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
1963 dev_err(&vsi->back->pdev->dev,
1964 "Failed to enable Rx ring %d on VSI %d\n",
1965 vf_q_id, vsi->vsi_num);
1966 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1967 goto error_param;
1968 }
1969
1970 set_bit(vf_q_id, vf->rxq_ena);
1971 vf->num_qs_ena++;
1972 }
1973
1974 vsi = pf->vsi[vf->lan_vsi_idx];
1975 q_map = vqs->tx_queues;
1976 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
1977 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1978 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1979 goto error_param;
1980 }
1981
1982 /* Skip queue if enabled */
1983 if (test_bit(vf_q_id, vf->txq_ena))
1984 continue;
1985
1986 set_bit(vf_q_id, vf->txq_ena);
1987 vf->num_qs_ena++;
1988 }
1989
1990 /* Set flag to indicate that queues are enabled */
1991 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1992 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1993
1994error_param:
1995 /* send the response to the VF */
1996 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1997 NULL, 0);
1998}
1999
2000/**
2001 * ice_vc_dis_qs_msg
2002 * @vf: pointer to the VF info
2003 * @msg: pointer to the msg buffer
2004 *
2005 * called from the VF to disable all or specific
2006 * queue(s)
2007 */
2008static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2009{
2010 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2011 struct virtchnl_queue_select *vqs =
2012 (struct virtchnl_queue_select *)msg;
2013 struct ice_pf *pf = vf->pf;
2014 struct ice_vsi *vsi;
2015 unsigned long q_map;
2016 u16 vf_q_id;
2017
2018 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2019 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2020 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2021 goto error_param;
2022 }
2023
2024 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2025 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2026 goto error_param;
2027 }
2028
2029 if (!vqs->rx_queues && !vqs->tx_queues) {
2030 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2031 goto error_param;
2032 }
2033
2034 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
2035 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
2036 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2037 goto error_param;
2038 }
2039
2040 vsi = pf->vsi[vf->lan_vsi_idx];
2041 if (!vsi) {
2042 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2043 goto error_param;
2044 }
2045
2046 if (vqs->tx_queues) {
2047 q_map = vqs->tx_queues;
2048
2049 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2050 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2051 struct ice_txq_meta txq_meta = { 0 };
2052
2053 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2054 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2055 goto error_param;
2056 }
2057
2058 /* Skip queue if not enabled */
2059 if (!test_bit(vf_q_id, vf->txq_ena))
2060 continue;
2061
2062 ice_fill_txq_meta(vsi, ring, &txq_meta);
2063
2064 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2065 ring, &txq_meta)) {
2066 dev_err(&vsi->back->pdev->dev,
2067 "Failed to stop Tx ring %d on VSI %d\n",
2068 vf_q_id, vsi->vsi_num);
2069 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2070 goto error_param;
2071 }
2072
2073 /* Clear enabled queues flag */
2074 clear_bit(vf_q_id, vf->txq_ena);
2075 vf->num_qs_ena--;
2076 }
2077 }
2078
2079 if (vqs->rx_queues) {
2080 q_map = vqs->rx_queues;
2081
2082 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2083 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2084 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2085 goto error_param;
2086 }
2087
2088 /* Skip queue if not enabled */
2089 if (!test_bit(vf_q_id, vf->rxq_ena))
2090 continue;
2091
2092 if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
2093 dev_err(&vsi->back->pdev->dev,
2094 "Failed to stop Rx ring %d on VSI %d\n",
2095 vf_q_id, vsi->vsi_num);
2096 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2097 goto error_param;
2098 }
2099
2100 /* Clear enabled queues flag */
2101 clear_bit(vf_q_id, vf->rxq_ena);
2102 vf->num_qs_ena--;
2103 }
2104 }
2105
2106 /* Clear enabled queues flag */
2107 if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
2108 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2109
2110error_param:
2111 /* send the response to the VF */
2112 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2113 NULL, 0);
2114}
2115
2116/**
2117 * ice_vc_cfg_irq_map_msg
2118 * @vf: pointer to the VF info
2119 * @msg: pointer to the msg buffer
2120 *
2121 * called from the VF to configure the IRQ to queue map
2122 */
2123static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2124{
2125 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2126 struct virtchnl_irq_map_info *irqmap_info;
2127 u16 vsi_id, vsi_q_id, vector_id;
2128 struct virtchnl_vector_map *map;
2129 struct ice_pf *pf = vf->pf;
2130 u16 num_q_vectors_mapped;
2131 struct ice_vsi *vsi;
2132 unsigned long qmap;
2133 int i;
2134
2135 irqmap_info = (struct virtchnl_irq_map_info *)msg;
2136 num_q_vectors_mapped = irqmap_info->num_vectors;
2137
2138 /* Check to make sure number of VF vectors mapped is not greater than
2139 * number of VF vectors originally allocated, and check that
2140 * there is actually at least a single VF queue vector mapped
2141 */
2142 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2143 pf->num_vf_msix < num_q_vectors_mapped ||
2144 !irqmap_info->num_vectors) {
2145 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2146 goto error_param;
2147 }
2148
2149 vsi = pf->vsi[vf->lan_vsi_idx];
2150 if (!vsi) {
2151 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2152 goto error_param;
2153 }
2154
2155 for (i = 0; i < num_q_vectors_mapped; i++) {
2156 struct ice_q_vector *q_vector;
2157
2158 map = &irqmap_info->vecmap[i];
2159
2160 vector_id = map->vector_id;
2161 vsi_id = map->vsi_id;
2162 /* validate msg params */
2163 if (!(vector_id < pf->hw.func_caps.common_cap
2164 .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2165 (!vector_id && (map->rxq_map || map->txq_map))) {
2166 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2167 goto error_param;
2168 }
2169
2170 /* No need to map VF miscellaneous or rogue vector */
2171 if (!vector_id)
2172 continue;
2173
2174 /* Subtract non queue vector from vector_id passed by VF
2175 * to get actual number of VSI queue vector array index
2176 */
2177 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2178 if (!q_vector) {
2179 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2180 goto error_param;
2181 }
2182
2183 /* lookout for the invalid queue index */
2184 qmap = map->rxq_map;
2185 q_vector->num_ring_rx = 0;
2186 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2187 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2188 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2189 goto error_param;
2190 }
2191 q_vector->num_ring_rx++;
2192 q_vector->rx.itr_idx = map->rxitr_idx;
2193 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2194 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2195 q_vector->rx.itr_idx);
2196 }
2197
2198 qmap = map->txq_map;
2199 q_vector->num_ring_tx = 0;
2200 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2201 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2202 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2203 goto error_param;
2204 }
2205 q_vector->num_ring_tx++;
2206 q_vector->tx.itr_idx = map->txitr_idx;
2207 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2208 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2209 q_vector->tx.itr_idx);
2210 }
2211 }
2212
2213error_param:
2214 /* send the response to the VF */
2215 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2216 NULL, 0);
2217}
2218
2219/**
2220 * ice_vc_cfg_qs_msg
2221 * @vf: pointer to the VF info
2222 * @msg: pointer to the msg buffer
2223 *
2224 * called from the VF to configure the Rx/Tx queues
2225 */
2226static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2227{
2228 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2229 struct virtchnl_vsi_queue_config_info *qci =
2230 (struct virtchnl_vsi_queue_config_info *)msg;
2231 struct virtchnl_queue_pair_info *qpi;
2232 u16 num_rxq = 0, num_txq = 0;
2233 struct ice_pf *pf = vf->pf;
2234 struct ice_vsi *vsi;
2235 int i;
2236
2237 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2238 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2239 goto error_param;
2240 }
2241
2242 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2243 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2244 goto error_param;
2245 }
2246
2247 vsi = pf->vsi[vf->lan_vsi_idx];
2248 if (!vsi) {
2249 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2250 goto error_param;
2251 }
2252
2253 if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
2254 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2255 dev_err(&pf->pdev->dev,
2256 "VF-%d requesting more than supported number of queues: %d\n",
2257 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2258 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2259 goto error_param;
2260 }
2261
2262 for (i = 0; i < qci->num_queue_pairs; i++) {
2263 qpi = &qci->qpair[i];
2264 if (qpi->txq.vsi_id != qci->vsi_id ||
2265 qpi->rxq.vsi_id != qci->vsi_id ||
2266 qpi->rxq.queue_id != qpi->txq.queue_id ||
2267 qpi->txq.headwb_enabled ||
2268 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2269 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2270 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2271 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2272 goto error_param;
2273 }
2274 /* copy Tx queue info from VF into VSI */
2275 if (qpi->txq.ring_len > 0) {
2276 num_txq++;
2277 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2278 vsi->tx_rings[i]->count = qpi->txq.ring_len;
2279 }
2280
2281 /* copy Rx queue info from VF into VSI */
2282 if (qpi->rxq.ring_len > 0) {
2283 num_rxq++;
2284 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2285 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2286
2287 if (qpi->rxq.databuffer_size != 0 &&
2288 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2289 qpi->rxq.databuffer_size < 1024)) {
2290 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2291 goto error_param;
2292 }
2293 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2294 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2295 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2296 qpi->rxq.max_pkt_size < 64) {
2297 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2298 goto error_param;
2299 }
2300 }
2301
2302 vsi->max_frame = qpi->rxq.max_pkt_size;
2303 }
2304
2305 /* VF can request to configure less than allocated queues
2306 * or default allocated queues. So update the VSI with new number
2307 */
2308 vsi->num_txq = num_txq;
2309 vsi->num_rxq = num_rxq;
2310 /* All queues of VF VSI are in TC 0 */
2311 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2312 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
2313
2314 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2315 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2316
2317error_param:
2318 /* send the response to the VF */
2319 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2320 NULL, 0);
2321}
2322
2323/**
2324 * ice_is_vf_trusted
2325 * @vf: pointer to the VF info
2326 */
2327static bool ice_is_vf_trusted(struct ice_vf *vf)
2328{
2329 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2330}
2331
2332/**
2333 * ice_can_vf_change_mac
2334 * @vf: pointer to the VF info
2335 *
2336 * Return true if the VF is allowed to change its MAC filters, false otherwise
2337 */
2338static bool ice_can_vf_change_mac(struct ice_vf *vf)
2339{
2340 /* If the VF MAC address has been set administratively (via the
2341 * ndo_set_vf_mac command), then deny permission to the VF to
2342 * add/delete unicast MAC addresses, unless the VF is trusted
2343 */
2344 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2345 return false;
2346
2347 return true;
2348}
2349
2350/**
2351 * ice_vc_handle_mac_addr_msg
2352 * @vf: pointer to the VF info
2353 * @msg: pointer to the msg buffer
2354 * @set: true if MAC filters are being set, false otherwise
2355 *
2356 * add guest MAC address filter
2357 */
2358static int
2359ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2360{
2361 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2362 struct virtchnl_ether_addr_list *al =
2363 (struct virtchnl_ether_addr_list *)msg;
2364 struct ice_pf *pf = vf->pf;
2365 enum virtchnl_ops vc_op;
2366 enum ice_status status;
2367 struct ice_vsi *vsi;
2368 int mac_count = 0;
2369 int i;
2370
2371 if (set)
2372 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
2373 else
2374 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
2375
2376 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2377 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2378 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2379 goto handle_mac_exit;
2380 }
2381
2382 if (set && !ice_is_vf_trusted(vf) &&
2383 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
2384 dev_err(&pf->pdev->dev,
2385 "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2386 vf->vf_id);
2387 /* There is no need to let VF know about not being trusted
2388 * to add more MAC addr, so we can just return success message.
2389 */
2390 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2391 goto handle_mac_exit;
2392 }
2393
2394 vsi = pf->vsi[vf->lan_vsi_idx];
2395 if (!vsi) {
2396 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2397 goto handle_mac_exit;
2398 }
2399
2400 for (i = 0; i < al->num_elements; i++) {
2401 u8 *maddr = al->list[i].addr;
2402
2403 if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
2404 is_broadcast_ether_addr(maddr)) {
2405 if (set) {
2406 /* VF is trying to add filters that the PF
2407 * already added. Just continue.
2408 */
2409 dev_info(&pf->pdev->dev,
2410 "MAC %pM already set for VF %d\n",
2411 maddr, vf->vf_id);
2412 continue;
2413 } else {
2414 /* VF can't remove dflt_lan_addr/bcast MAC */
2415 dev_err(&pf->pdev->dev,
2416 "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
2417 maddr, vf->vf_id);
2418 continue;
2419 }
2420 }
2421
2422 /* check for the invalid cases and bail if necessary */
2423 if (is_zero_ether_addr(maddr)) {
2424 dev_err(&pf->pdev->dev,
2425 "invalid MAC %pM provided for VF %d\n",
2426 maddr, vf->vf_id);
2427 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2428 goto handle_mac_exit;
2429 }
2430
2431 if (is_unicast_ether_addr(maddr) &&
2432 !ice_can_vf_change_mac(vf)) {
2433 dev_err(&pf->pdev->dev,
2434 "can't change unicast MAC for untrusted VF %d\n",
2435 vf->vf_id);
2436 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2437 goto handle_mac_exit;
2438 }
2439
2440 /* program the updated filter list */
2441 status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
2442 if (status == ICE_ERR_DOES_NOT_EXIST ||
2443 status == ICE_ERR_ALREADY_EXISTS) {
2444 dev_info(&pf->pdev->dev,
2445 "can't %s MAC filters %pM for VF %d, error %d\n",
2446 set ? "add" : "remove", maddr, vf->vf_id,
2447 status);
2448 } else if (status) {
2449 dev_err(&pf->pdev->dev,
2450 "can't %s MAC filters for VF %d, error %d\n",
2451 set ? "add" : "remove", vf->vf_id, status);
2452 v_ret = ice_err_to_virt_err(status);
2453 goto handle_mac_exit;
2454 }
2455
2456 mac_count++;
2457 }
2458
2459 /* Track number of MAC filters programmed for the VF VSI */
2460 if (set)
2461 vf->num_mac += mac_count;
2462 else
2463 vf->num_mac -= mac_count;
2464
2465handle_mac_exit:
2466 /* send the response to the VF */
2467 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2468}
2469
2470/**
2471 * ice_vc_add_mac_addr_msg
2472 * @vf: pointer to the VF info
2473 * @msg: pointer to the msg buffer
2474 *
2475 * add guest MAC address filter
2476 */
2477static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2478{
2479 return ice_vc_handle_mac_addr_msg(vf, msg, true);
2480}
2481
2482/**
2483 * ice_vc_del_mac_addr_msg
2484 * @vf: pointer to the VF info
2485 * @msg: pointer to the msg buffer
2486 *
2487 * remove guest MAC address filter
2488 */
2489static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2490{
2491 return ice_vc_handle_mac_addr_msg(vf, msg, false);
2492}
2493
2494/**
2495 * ice_vc_request_qs_msg
2496 * @vf: pointer to the VF info
2497 * @msg: pointer to the msg buffer
2498 *
2499 * VFs get a default number of queues but can use this message to request a
2500 * different number. If the request is successful, PF will reset the VF and
2501 * return 0. If unsuccessful, PF will send message informing VF of number of
2502 * available queue pairs via virtchnl message response to VF.
2503 */
2504static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2505{
2506 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2507 struct virtchnl_vf_res_request *vfres =
2508 (struct virtchnl_vf_res_request *)msg;
2509 u16 req_queues = vfres->num_queue_pairs;
2510 struct ice_pf *pf = vf->pf;
2511 u16 max_allowed_vf_queues;
2512 u16 tx_rx_queue_left;
2513 u16 cur_queues;
2514
2515 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2516 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2517 goto error_param;
2518 }
2519
2520 cur_queues = vf->num_vf_qs;
2521 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2522 ice_get_avail_rxq_count(pf));
2523 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2524 if (!req_queues) {
2525 dev_err(&pf->pdev->dev,
2526 "VF %d tried to request 0 queues. Ignoring.\n",
2527 vf->vf_id);
2528 } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
2529 dev_err(&pf->pdev->dev,
2530 "VF %d tried to request more than %d queues.\n",
2531 vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
2532 vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
2533 } else if (req_queues > cur_queues &&
2534 req_queues - cur_queues > tx_rx_queue_left) {
2535 dev_warn(&pf->pdev->dev,
2536 "VF %d requested %u more queues, but only %u left.\n",
2537 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2538 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2539 ICE_MAX_BASE_QS_PER_VF);
2540 } else {
2541 /* request is successful, then reset VF */
2542 vf->num_req_qs = req_queues;
2543 ice_vc_dis_vf(vf);
2544 dev_info(&pf->pdev->dev,
2545 "VF %d granted request of %u queues.\n",
2546 vf->vf_id, req_queues);
2547 return 0;
2548 }
2549
2550error_param:
2551 /* send the response to the VF */
2552 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2553 v_ret, (u8 *)vfres, sizeof(*vfres));
2554}
2555
2556/**
2557 * ice_set_vf_port_vlan
2558 * @netdev: network interface device structure
2559 * @vf_id: VF identifier
2560 * @vlan_id: VLAN ID being set
2561 * @qos: priority setting
2562 * @vlan_proto: VLAN protocol
2563 *
2564 * program VF Port VLAN ID and/or QoS
2565 */
2566int
2567ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
2568 __be16 vlan_proto)
2569{
2570 u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
2571 struct ice_netdev_priv *np = netdev_priv(netdev);
2572 struct ice_pf *pf = np->vsi->back;
2573 struct ice_vsi *vsi;
2574 struct ice_vf *vf;
2575 int ret = 0;
2576
2577 /* validate the request */
2578 if (vf_id >= pf->num_alloc_vfs) {
2579 dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
2580 return -EINVAL;
2581 }
2582
2583 if (vlan_id > ICE_MAX_VLANID || qos > 7) {
2584 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2585 return -EINVAL;
2586 }
2587
2588 if (vlan_proto != htons(ETH_P_8021Q)) {
2589 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
2590 return -EPROTONOSUPPORT;
2591 }
2592
2593 vf = &pf->vf[vf_id];
2594 vsi = pf->vsi[vf->lan_vsi_idx];
2595 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2596 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
2597 return -EBUSY;
2598 }
2599
2600 if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
2601 /* duplicate request, so just return success */
2602 dev_info(&pf->pdev->dev,
2603 "Duplicate pvid %d request\n", vlanprio);
2604 return ret;
2605 }
2606
2607 /* If PVID, then remove all filters on the old VLAN */
2608 if (vsi->info.pvid)
2609 ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2610 VLAN_VID_MASK));
2611
2612 if (vlan_id || qos) {
2613 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
2614 if (ret)
2615 goto error_set_pvid;
2616 } else {
2617 ice_vsi_manage_pvid(vsi, 0, false);
2618 vsi->info.pvid = 0;
2619 }
2620
2621 if (vlan_id) {
2622 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2623 vlan_id, qos, vf_id);
2624
2625 /* add new VLAN filter for each MAC */
2626 ret = ice_vsi_add_vlan(vsi, vlan_id);
2627 if (ret)
2628 goto error_set_pvid;
2629 }
2630
2631 /* The Port VLAN needs to be saved across resets the same as the
2632 * default LAN MAC address.
2633 */
2634 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2635
2636error_set_pvid:
2637 return ret;
2638}
2639
2640/**
2641 * ice_vc_process_vlan_msg
2642 * @vf: pointer to the VF info
2643 * @msg: pointer to the msg buffer
2644 * @add_v: Add VLAN if true, otherwise delete VLAN
2645 *
2646 * Process virtchnl op to add or remove programmed guest VLAN ID
2647 */
2648static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2649{
2650 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2651 struct virtchnl_vlan_filter_list *vfl =
2652 (struct virtchnl_vlan_filter_list *)msg;
2653 struct ice_pf *pf = vf->pf;
2654 bool vlan_promisc = false;
2655 struct ice_vsi *vsi;
2656 struct ice_hw *hw;
2657 int status = 0;
2658 u8 promisc_m;
2659 int i;
2660
2661 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2662 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2663 goto error_param;
2664 }
2665
2666 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2667 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2668 goto error_param;
2669 }
2670
2671 if (add_v && !ice_is_vf_trusted(vf) &&
2672 vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2673 dev_info(&pf->pdev->dev,
2674 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2675 vf->vf_id);
2676 /* There is no need to let VF know about being not trusted,
2677 * so we can just return success message here
2678 */
2679 goto error_param;
2680 }
2681
2682 for (i = 0; i < vfl->num_elements; i++) {
2683 if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
2684 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2685 dev_err(&pf->pdev->dev,
2686 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2687 goto error_param;
2688 }
2689 }
2690
2691 hw = &pf->hw;
2692 vsi = pf->vsi[vf->lan_vsi_idx];
2693 if (!vsi) {
2694 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2695 goto error_param;
2696 }
2697
2698 if (vsi->info.pvid) {
2699 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2700 goto error_param;
2701 }
2702
2703 if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
2704 dev_err(&pf->pdev->dev,
2705 "%sable VLAN stripping failed for VSI %i\n",
2706 add_v ? "en" : "dis", vsi->vsi_num);
2707 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2708 goto error_param;
2709 }
2710
2711 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2712 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2713 vlan_promisc = true;
2714
2715 if (add_v) {
2716 for (i = 0; i < vfl->num_elements; i++) {
2717 u16 vid = vfl->vlan_id[i];
2718
2719 if (!ice_is_vf_trusted(vf) &&
2720 vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2721 dev_info(&pf->pdev->dev,
2722 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2723 vf->vf_id);
2724 /* There is no need to let VF know about being
2725 * not trusted, so we can just return success
2726 * message here as well.
2727 */
2728 goto error_param;
2729 }
2730
2731 if (ice_vsi_add_vlan(vsi, vid)) {
2732 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2733 goto error_param;
2734 }
2735
2736 vf->num_vlan++;
2737 /* Enable VLAN pruning when VLAN is added */
2738 if (!vlan_promisc) {
2739 status = ice_cfg_vlan_pruning(vsi, true, false);
2740 if (status) {
2741 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2742 dev_err(&pf->pdev->dev,
2743 "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2744 vid, status);
2745 goto error_param;
2746 }
2747 } else {
2748 /* Enable Ucast/Mcast VLAN promiscuous mode */
2749 promisc_m = ICE_PROMISC_VLAN_TX |
2750 ICE_PROMISC_VLAN_RX;
2751
2752 status = ice_set_vsi_promisc(hw, vsi->idx,
2753 promisc_m, vid);
2754 if (status) {
2755 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2756 dev_err(&pf->pdev->dev,
2757 "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2758 vid, status);
2759 }
2760 }
2761 }
2762 } else {
2763 /* In case of non_trusted VF, number of VLAN elements passed
2764 * to PF for removal might be greater than number of VLANs
2765 * filter programmed for that VF - So, use actual number of
2766 * VLANS added earlier with add VLAN opcode. In order to avoid
2767 * removing VLAN that doesn't exist, which result to sending
2768 * erroneous failed message back to the VF
2769 */
2770 int num_vf_vlan;
2771
2772 num_vf_vlan = vf->num_vlan;
2773 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2774 u16 vid = vfl->vlan_id[i];
2775
2776 /* Make sure ice_vsi_kill_vlan is successful before
2777 * updating VLAN information
2778 */
2779 if (ice_vsi_kill_vlan(vsi, vid)) {
2780 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2781 goto error_param;
2782 }
2783
2784 vf->num_vlan--;
2785 /* Disable VLAN pruning when the last VLAN is removed */
2786 if (!vf->num_vlan)
2787 ice_cfg_vlan_pruning(vsi, false, false);
2788
2789 /* Disable Unicast/Multicast VLAN promiscuous mode */
2790 if (vlan_promisc) {
2791 promisc_m = ICE_PROMISC_VLAN_TX |
2792 ICE_PROMISC_VLAN_RX;
2793
2794 ice_clear_vsi_promisc(hw, vsi->idx,
2795 promisc_m, vid);
2796 }
2797 }
2798 }
2799
2800error_param:
2801 /* send the response to the VF */
2802 if (add_v)
2803 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2804 NULL, 0);
2805 else
2806 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2807 NULL, 0);
2808}
2809
2810/**
2811 * ice_vc_add_vlan_msg
2812 * @vf: pointer to the VF info
2813 * @msg: pointer to the msg buffer
2814 *
2815 * Add and program guest VLAN ID
2816 */
2817static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2818{
2819 return ice_vc_process_vlan_msg(vf, msg, true);
2820}
2821
2822/**
2823 * ice_vc_remove_vlan_msg
2824 * @vf: pointer to the VF info
2825 * @msg: pointer to the msg buffer
2826 *
2827 * remove programmed guest VLAN ID
2828 */
2829static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2830{
2831 return ice_vc_process_vlan_msg(vf, msg, false);
2832}
2833
2834/**
2835 * ice_vc_ena_vlan_stripping
2836 * @vf: pointer to the VF info
2837 *
2838 * Enable VLAN header stripping for a given VF
2839 */
2840static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2841{
2842 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2843 struct ice_pf *pf = vf->pf;
2844 struct ice_vsi *vsi;
2845
2846 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2847 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2848 goto error_param;
2849 }
2850
2851 vsi = pf->vsi[vf->lan_vsi_idx];
2852 if (ice_vsi_manage_vlan_stripping(vsi, true))
2853 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2854
2855error_param:
2856 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2857 v_ret, NULL, 0);
2858}
2859
2860/**
2861 * ice_vc_dis_vlan_stripping
2862 * @vf: pointer to the VF info
2863 *
2864 * Disable VLAN header stripping for a given VF
2865 */
2866static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2867{
2868 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2869 struct ice_pf *pf = vf->pf;
2870 struct ice_vsi *vsi;
2871
2872 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2873 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2874 goto error_param;
2875 }
2876
2877 vsi = pf->vsi[vf->lan_vsi_idx];
2878 if (!vsi) {
2879 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2880 goto error_param;
2881 }
2882
2883 if (ice_vsi_manage_vlan_stripping(vsi, false))
2884 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2885
2886error_param:
2887 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2888 v_ret, NULL, 0);
2889}
2890
2891/**
2892 * ice_vc_process_vf_msg - Process request from VF
2893 * @pf: pointer to the PF structure
2894 * @event: pointer to the AQ event
2895 *
2896 * called from the common asq/arq handler to
2897 * process request from VF
2898 */
2899void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
2900{
2901 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
2902 s16 vf_id = le16_to_cpu(event->desc.retval);
2903 u16 msglen = event->msg_len;
2904 u8 *msg = event->msg_buf;
2905 struct ice_vf *vf = NULL;
2906 int err = 0;
2907
2908 if (vf_id >= pf->num_alloc_vfs) {
2909 err = -EINVAL;
2910 goto error_handler;
2911 }
2912
2913 vf = &pf->vf[vf_id];
2914
2915 /* Check if VF is disabled. */
2916 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
2917 err = -EPERM;
2918 goto error_handler;
2919 }
2920
2921 /* Perform basic checks on the msg */
2922 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2923 if (err) {
2924 if (err == VIRTCHNL_STATUS_ERR_PARAM)
2925 err = -EPERM;
2926 else
2927 err = -EINVAL;
2928 }
2929
2930error_handler:
2931 if (err) {
2932 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
2933 NULL, 0);
2934 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
2935 vf_id, v_opcode, msglen, err);
2936 return;
2937 }
2938
2939 switch (v_opcode) {
2940 case VIRTCHNL_OP_VERSION:
2941 err = ice_vc_get_ver_msg(vf, msg);
2942 break;
2943 case VIRTCHNL_OP_GET_VF_RESOURCES:
2944 err = ice_vc_get_vf_res_msg(vf, msg);
2945 ice_vc_notify_vf_link_state(vf);
2946 break;
2947 case VIRTCHNL_OP_RESET_VF:
2948 ice_vc_reset_vf_msg(vf);
2949 break;
2950 case VIRTCHNL_OP_ADD_ETH_ADDR:
2951 err = ice_vc_add_mac_addr_msg(vf, msg);
2952 break;
2953 case VIRTCHNL_OP_DEL_ETH_ADDR:
2954 err = ice_vc_del_mac_addr_msg(vf, msg);
2955 break;
2956 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2957 err = ice_vc_cfg_qs_msg(vf, msg);
2958 break;
2959 case VIRTCHNL_OP_ENABLE_QUEUES:
2960 err = ice_vc_ena_qs_msg(vf, msg);
2961 ice_vc_notify_vf_link_state(vf);
2962 break;
2963 case VIRTCHNL_OP_DISABLE_QUEUES:
2964 err = ice_vc_dis_qs_msg(vf, msg);
2965 break;
2966 case VIRTCHNL_OP_REQUEST_QUEUES:
2967 err = ice_vc_request_qs_msg(vf, msg);
2968 break;
2969 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2970 err = ice_vc_cfg_irq_map_msg(vf, msg);
2971 break;
2972 case VIRTCHNL_OP_CONFIG_RSS_KEY:
2973 err = ice_vc_config_rss_key(vf, msg);
2974 break;
2975 case VIRTCHNL_OP_CONFIG_RSS_LUT:
2976 err = ice_vc_config_rss_lut(vf, msg);
2977 break;
2978 case VIRTCHNL_OP_GET_STATS:
2979 err = ice_vc_get_stats_msg(vf, msg);
2980 break;
2981 case VIRTCHNL_OP_ADD_VLAN:
2982 err = ice_vc_add_vlan_msg(vf, msg);
2983 break;
2984 case VIRTCHNL_OP_DEL_VLAN:
2985 err = ice_vc_remove_vlan_msg(vf, msg);
2986 break;
2987 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2988 err = ice_vc_ena_vlan_stripping(vf);
2989 break;
2990 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2991 err = ice_vc_dis_vlan_stripping(vf);
2992 break;
2993 case VIRTCHNL_OP_UNKNOWN:
2994 default:
2995 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2996 v_opcode, vf_id);
2997 err = ice_vc_send_msg_to_vf(vf, v_opcode,
2998 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
2999 NULL, 0);
3000 break;
3001 }
3002 if (err) {
3003 /* Helper function cares less about error return values here
3004 * as it is busy with pending work.
3005 */
3006 dev_info(&pf->pdev->dev,
3007 "PF failed to honor VF %d, opcode %d, error %d\n",
3008 vf_id, v_opcode, err);
3009 }
3010}
3011
3012/**
3013 * ice_get_vf_cfg
3014 * @netdev: network interface device structure
3015 * @vf_id: VF identifier
3016 * @ivi: VF configuration structure
3017 *
3018 * return VF configuration
3019 */
3020int
3021ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3022{
3023 struct ice_netdev_priv *np = netdev_priv(netdev);
3024 struct ice_vsi *vsi = np->vsi;
3025 struct ice_pf *pf = vsi->back;
3026 struct ice_vf *vf;
3027
3028 /* validate the request */
3029 if (vf_id >= pf->num_alloc_vfs) {
3030 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
3031 return -EINVAL;
3032 }
3033
3034 vf = &pf->vf[vf_id];
3035 vsi = pf->vsi[vf->lan_vsi_idx];
3036
3037 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3038 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
3039 return -EBUSY;
3040 }
3041
3042 ivi->vf = vf_id;
3043 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3044
3045 /* VF configuration for VLAN and applicable QoS */
3046 ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
3047 ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
3048 ICE_VLAN_PRIORITY_S;
3049
3050 ivi->trusted = vf->trusted;
3051 ivi->spoofchk = vf->spoofchk;
3052 if (!vf->link_forced)
3053 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3054 else if (vf->link_up)
3055 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3056 else
3057 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3058 ivi->max_tx_rate = vf->tx_rate;
3059 ivi->min_tx_rate = 0;
3060 return 0;
3061}
3062
3063/**
3064 * ice_set_vf_spoofchk
3065 * @netdev: network interface device structure
3066 * @vf_id: VF identifier
3067 * @ena: flag to enable or disable feature
3068 *
3069 * Enable or disable VF spoof checking
3070 */
3071int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
3072{
3073 struct ice_netdev_priv *np = netdev_priv(netdev);
3074 struct ice_vsi *vsi = np->vsi;
3075 struct ice_pf *pf = vsi->back;
3076 struct ice_vsi_ctx *ctx;
3077 enum ice_status status;
3078 struct ice_vf *vf;
3079 int ret = 0;
3080
3081 /* validate the request */
3082 if (vf_id >= pf->num_alloc_vfs) {
3083 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
3084 return -EINVAL;
3085 }
3086
3087 vf = &pf->vf[vf_id];
3088 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3089 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
3090 return -EBUSY;
3091 }
3092
3093 if (ena == vf->spoofchk) {
3094 dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
3095 ena ? "ON" : "OFF");
3096 return 0;
3097 }
3098
3099 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
3100 if (!ctx)
3101 return -ENOMEM;
3102
3103 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
3104
3105 if (ena) {
3106 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
3107 ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
3108 }
3109
3110 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3111 if (status) {
3112 dev_dbg(&pf->pdev->dev,
3113 "Error %d, failed to update VSI* parameters\n", status);
3114 ret = -EIO;
3115 goto out;
3116 }
3117
3118 vf->spoofchk = ena;
3119 vsi->info.sec_flags = ctx->info.sec_flags;
3120 vsi->info.sw_flags2 = ctx->info.sw_flags2;
3121out:
3122 devm_kfree(&pf->pdev->dev, ctx);
3123 return ret;
3124}
3125
3126/**
3127 * ice_set_vf_mac
3128 * @netdev: network interface device structure
3129 * @vf_id: VF identifier
3130 * @mac: MAC address
3131 *
3132 * program VF MAC address
3133 */
3134int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3135{
3136 struct ice_netdev_priv *np = netdev_priv(netdev);
3137 struct ice_vsi *vsi = np->vsi;
3138 struct ice_pf *pf = vsi->back;
3139 struct ice_vf *vf;
3140 int ret = 0;
3141
3142 /* validate the request */
3143 if (vf_id >= pf->num_alloc_vfs) {
3144 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
3145 return -EINVAL;
3146 }
3147
3148 vf = &pf->vf[vf_id];
3149 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3150 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
3151 return -EBUSY;
3152 }
3153
3154 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3155 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3156 return -EINVAL;
3157 }
3158
3159 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
3160 * flow will use the updated dflt_lan_addr and add a MAC filter
3161 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3162 * set the MAC address for this VF.
3163 */
3164 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3165 vf->pf_set_mac = true;
3166 netdev_info(netdev,
3167 "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
3168 vf_id, mac);
3169
3170 ice_vc_dis_vf(vf);
3171 return ret;
3172}
3173
3174/**
3175 * ice_set_vf_trust
3176 * @netdev: network interface device structure
3177 * @vf_id: VF identifier
3178 * @trusted: Boolean value to enable/disable trusted VF
3179 *
3180 * Enable or disable a given VF as trusted
3181 */
3182int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3183{
3184 struct ice_netdev_priv *np = netdev_priv(netdev);
3185 struct ice_vsi *vsi = np->vsi;
3186 struct ice_pf *pf = vsi->back;
3187 struct ice_vf *vf;
3188
3189 /* validate the request */
3190 if (vf_id >= pf->num_alloc_vfs) {
3191 dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
3192 return -EINVAL;
3193 }
3194
3195 vf = &pf->vf[vf_id];
3196 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3197 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
3198 return -EBUSY;
3199 }
3200
3201 /* Check if already trusted */
3202 if (trusted == vf->trusted)
3203 return 0;
3204
3205 vf->trusted = trusted;
3206 ice_vc_dis_vf(vf);
3207 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
3208 vf_id, trusted ? "" : "un");
3209
3210 return 0;
3211}
3212
3213/**
3214 * ice_set_vf_link_state
3215 * @netdev: network interface device structure
3216 * @vf_id: VF identifier
3217 * @link_state: required link state
3218 *
3219 * Set VF's link state, irrespective of physical link state status
3220 */
3221int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3222{
3223 struct ice_netdev_priv *np = netdev_priv(netdev);
3224 struct ice_pf *pf = np->vsi->back;
3225 struct virtchnl_pf_event pfe = { 0 };
3226 struct ice_link_status *ls;
3227 struct ice_vf *vf;
3228 struct ice_hw *hw;
3229
3230 if (vf_id >= pf->num_alloc_vfs) {
3231 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3232 return -EINVAL;
3233 }
3234
3235 vf = &pf->vf[vf_id];
3236 hw = &pf->hw;
3237 ls = &pf->hw.port_info->phy.link_info;
3238
3239 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3240 dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
3241 return -EBUSY;
3242 }
3243
3244 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
3245 pfe.severity = PF_EVENT_SEVERITY_INFO;
3246
3247 switch (link_state) {
3248 case IFLA_VF_LINK_STATE_AUTO:
3249 vf->link_forced = false;
3250 vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
3251 break;
3252 case IFLA_VF_LINK_STATE_ENABLE:
3253 vf->link_forced = true;
3254 vf->link_up = true;
3255 break;
3256 case IFLA_VF_LINK_STATE_DISABLE:
3257 vf->link_forced = true;
3258 vf->link_up = false;
3259 break;
3260 default:
3261 return -EINVAL;
3262 }
3263
3264 if (vf->link_forced)
3265 ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
3266 else
3267 ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
3268
3269 /* Notify the VF of its new link state */
3270 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
3271 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
3272 sizeof(pfe), NULL);
3273
3274 return 0;
3275}