Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver
3 * Copyright(c) 2013 - 2017 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21
22#include "fm10k.h"
23#include "fm10k_vf.h"
24#include "fm10k_pf.h"
25
26static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
27 struct fm10k_mbx_info *mbx)
28{
29 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
30 struct fm10k_intfc *interface = hw->back;
31 struct pci_dev *pdev = interface->pdev;
32
33 dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
34 **results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
35
36 return fm10k_tlv_msg_error(hw, results, mbx);
37}
38
39/**
40 * fm10k_iov_msg_queue_mac_vlan - Message handler for MAC/VLAN request from VF
41 * @hw: Pointer to hardware structure
42 * @results: Pointer array to message, results[0] is pointer to message
43 * @mbx: Pointer to mailbox information structure
44 *
45 * This function is a custom handler for MAC/VLAN requests from the VF. The
46 * assumption is that it is acceptable to directly hand off the message from
47 * the VF to the PF's switch manager. However, we use a MAC/VLAN message
48 * queue to avoid overloading the mailbox when a large number of requests
49 * come in.
50 **/
51static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results,
52 struct fm10k_mbx_info *mbx)
53{
54 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
55 struct fm10k_intfc *interface = hw->back;
56 u8 mac[ETH_ALEN];
57 u32 *result;
58 int err = 0;
59 bool set;
60 u16 vlan;
61 u32 vid;
62
63 /* we shouldn't be updating rules on a disabled interface */
64 if (!FM10K_VF_FLAG_ENABLED(vf_info))
65 err = FM10K_ERR_PARAM;
66
67 if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
68 result = results[FM10K_MAC_VLAN_MSG_VLAN];
69
70 /* record VLAN id requested */
71 err = fm10k_tlv_attr_get_u32(result, &vid);
72 if (err)
73 return err;
74
75 set = !(vid & FM10K_VLAN_CLEAR);
76 vid &= ~FM10K_VLAN_CLEAR;
77
78 /* if the length field has been set, this is a multi-bit
79 * update request. For multi-bit requests, simply disallow
80 * them when the pf_vid has been set. In this case, the PF
81 * should have already cleared the VLAN_TABLE, and if we
82 * allowed them, it could allow a rogue VF to receive traffic
83 * on a VLAN it was not assigned. In the single-bit case, we
84 * need to modify requests for VLAN 0 to use the default PF or
85 * SW vid when assigned.
86 */
87
88 if (vid >> 16) {
89 /* prevent multi-bit requests when PF has
90 * administratively set the VLAN for this VF
91 */
92 if (vf_info->pf_vid)
93 return FM10K_ERR_PARAM;
94 } else {
95 err = fm10k_iov_select_vid(vf_info, (u16)vid);
96 if (err < 0)
97 return err;
98
99 vid = err;
100 }
101
102 /* update VSI info for VF in regards to VLAN table */
103 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
104 }
105
106 if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
107 result = results[FM10K_MAC_VLAN_MSG_MAC];
108
109 /* record unicast MAC address requested */
110 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
111 if (err)
112 return err;
113
114 /* block attempts to set MAC for a locked device */
115 if (is_valid_ether_addr(vf_info->mac) &&
116 !ether_addr_equal(mac, vf_info->mac))
117 return FM10K_ERR_PARAM;
118
119 set = !(vlan & FM10K_VLAN_CLEAR);
120 vlan &= ~FM10K_VLAN_CLEAR;
121
122 err = fm10k_iov_select_vid(vf_info, vlan);
123 if (err < 0)
124 return err;
125
126 vlan = (u16)err;
127
128 /* Add this request to the MAC/VLAN queue */
129 err = fm10k_queue_mac_request(interface, vf_info->glort,
130 mac, vlan, set);
131 }
132
133 if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
134 result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
135
136 /* record multicast MAC address requested */
137 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
138 if (err)
139 return err;
140
141 /* verify that the VF is allowed to request multicast */
142 if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
143 return FM10K_ERR_PARAM;
144
145 set = !(vlan & FM10K_VLAN_CLEAR);
146 vlan &= ~FM10K_VLAN_CLEAR;
147
148 err = fm10k_iov_select_vid(vf_info, vlan);
149 if (err < 0)
150 return err;
151
152 vlan = (u16)err;
153
154 /* Add this request to the MAC/VLAN queue */
155 err = fm10k_queue_mac_request(interface, vf_info->glort,
156 mac, vlan, set);
157 }
158
159 return err;
160}
161
162static const struct fm10k_msg_data iov_mbx_data[] = {
163 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
164 FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
165 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan),
166 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
167 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
168};
169
170s32 fm10k_iov_event(struct fm10k_intfc *interface)
171{
172 struct fm10k_hw *hw = &interface->hw;
173 struct fm10k_iov_data *iov_data;
174 s64 vflre;
175 int i;
176
177 /* if there is no iov_data then there is no mailbox to process */
178 if (!READ_ONCE(interface->iov_data))
179 return 0;
180
181 rcu_read_lock();
182
183 iov_data = interface->iov_data;
184
185 /* check again now that we are in the RCU block */
186 if (!iov_data)
187 goto read_unlock;
188
189 if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
190 goto read_unlock;
191
192 /* read VFLRE to determine if any VFs have been reset */
193 vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1));
194 vflre <<= 32;
195 vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
196
197 i = iov_data->num_vfs;
198
199 for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
200 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
201
202 if (vflre >= 0)
203 continue;
204
205 hw->iov.ops.reset_resources(hw, vf_info);
206 vf_info->mbx.ops.connect(hw, &vf_info->mbx);
207 }
208
209read_unlock:
210 rcu_read_unlock();
211
212 return 0;
213}
214
215s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
216{
217 struct fm10k_hw *hw = &interface->hw;
218 struct fm10k_iov_data *iov_data;
219 int i;
220
221 /* if there is no iov_data then there is no mailbox to process */
222 if (!READ_ONCE(interface->iov_data))
223 return 0;
224
225 rcu_read_lock();
226
227 iov_data = interface->iov_data;
228
229 /* check again now that we are in the RCU block */
230 if (!iov_data)
231 goto read_unlock;
232
233 /* lock the mailbox for transmit and receive */
234 fm10k_mbx_lock(interface);
235
236 /* Most VF messages sent to the PF cause the PF to respond by
237 * requesting from the SM mailbox. This means that too many VF
238 * messages processed at once could cause a mailbox timeout on the PF.
239 * To prevent this, store a pointer to the next VF mbx to process. Use
240 * that as the start of the loop so that we don't starve whichever VF
241 * got ignored on the previous run.
242 */
243process_mbx:
244 for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
245 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
246 struct fm10k_mbx_info *mbx = &vf_info->mbx;
247 u16 glort = vf_info->glort;
248
249 /* process the SM mailbox first to drain outgoing messages */
250 hw->mbx.ops.process(hw, &hw->mbx);
251
252 /* verify port mapping is valid, if not reset port */
253 if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) {
254 hw->iov.ops.reset_lport(hw, vf_info);
255 fm10k_clear_macvlan_queue(interface, glort, false);
256 }
257
258 /* reset VFs that have mailbox timed out */
259 if (!mbx->timeout) {
260 hw->iov.ops.reset_resources(hw, vf_info);
261 mbx->ops.connect(hw, mbx);
262 }
263
264 /* guarantee we have free space in the SM mailbox */
265 if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
266 /* keep track of how many times this occurs */
267 interface->hw_sm_mbx_full++;
268
269 /* make sure we try again momentarily */
270 fm10k_service_event_schedule(interface);
271
272 break;
273 }
274
275 /* cleanup mailbox and process received messages */
276 mbx->ops.process(hw, mbx);
277 }
278
279 /* if we stopped processing mailboxes early, update next_vf_mbx.
280 * Otherwise, reset next_vf_mbx, and restart loop so that we process
281 * the remaining mailboxes we skipped at the start.
282 */
283 if (i >= 0) {
284 iov_data->next_vf_mbx = i + 1;
285 } else if (iov_data->next_vf_mbx) {
286 iov_data->next_vf_mbx = 0;
287 goto process_mbx;
288 }
289
290 /* free the lock */
291 fm10k_mbx_unlock(interface);
292
293read_unlock:
294 rcu_read_unlock();
295
296 return 0;
297}
298
299void fm10k_iov_suspend(struct pci_dev *pdev)
300{
301 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
302 struct fm10k_iov_data *iov_data = interface->iov_data;
303 struct fm10k_hw *hw = &interface->hw;
304 int num_vfs, i;
305
306 /* pull out num_vfs from iov_data */
307 num_vfs = iov_data ? iov_data->num_vfs : 0;
308
309 /* shut down queue mapping for VFs */
310 fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
311 FM10K_DGLORTMAP_NONE);
312
313 /* Stop any active VFs and reset their resources */
314 for (i = 0; i < num_vfs; i++) {
315 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
316
317 hw->iov.ops.reset_resources(hw, vf_info);
318 hw->iov.ops.reset_lport(hw, vf_info);
319 fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
320 }
321}
322
323int fm10k_iov_resume(struct pci_dev *pdev)
324{
325 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
326 struct fm10k_iov_data *iov_data = interface->iov_data;
327 struct fm10k_dglort_cfg dglort = { 0 };
328 struct fm10k_hw *hw = &interface->hw;
329 int num_vfs, i;
330
331 /* pull out num_vfs from iov_data */
332 num_vfs = iov_data ? iov_data->num_vfs : 0;
333
334 /* return error if iov_data is not already populated */
335 if (!iov_data)
336 return -ENOMEM;
337
338 /* allocate hardware resources for the VFs */
339 hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
340
341 /* configure DGLORT mapping for RSS */
342 dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
343 dglort.idx = fm10k_dglort_vf_rss;
344 dglort.inner_rss = 1;
345 dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
346 dglort.queue_b = fm10k_vf_queue_index(hw, 0);
347 dglort.vsi_l = fls(hw->iov.total_vfs - 1);
348 dglort.vsi_b = 1;
349
350 hw->mac.ops.configure_dglort_map(hw, &dglort);
351
352 /* assign resources to the device */
353 for (i = 0; i < num_vfs; i++) {
354 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
355
356 /* allocate all but the last GLORT to the VFs */
357 if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
358 break;
359
360 /* assign GLORT to VF, and restrict it to multicast */
361 hw->iov.ops.set_lport(hw, vf_info, i,
362 FM10K_VF_FLAG_MULTI_CAPABLE);
363
364 /* mailbox is disconnected so we don't send a message */
365 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
366
367 /* now we are ready so we can connect */
368 vf_info->mbx.ops.connect(hw, &vf_info->mbx);
369 }
370
371 return 0;
372}
373
374s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
375{
376 struct fm10k_iov_data *iov_data = interface->iov_data;
377 struct fm10k_hw *hw = &interface->hw;
378 struct fm10k_vf_info *vf_info;
379 u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
380
381 /* no IOV support, not our message to process */
382 if (!iov_data)
383 return FM10K_ERR_PARAM;
384
385 /* glort outside our range, not our message to process */
386 if (vf_idx >= iov_data->num_vfs)
387 return FM10K_ERR_PARAM;
388
389 /* determine if an update has occurred and if so notify the VF */
390 vf_info = &iov_data->vf_info[vf_idx];
391 if (vf_info->sw_vid != pvid) {
392 vf_info->sw_vid = pvid;
393 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
394 }
395
396 return 0;
397}
398
399static void fm10k_iov_free_data(struct pci_dev *pdev)
400{
401 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
402
403 if (!interface->iov_data)
404 return;
405
406 /* reclaim hardware resources */
407 fm10k_iov_suspend(pdev);
408
409 /* drop iov_data from interface */
410 kfree_rcu(interface->iov_data, rcu);
411 interface->iov_data = NULL;
412}
413
414static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
415{
416 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
417 struct fm10k_iov_data *iov_data = interface->iov_data;
418 struct fm10k_hw *hw = &interface->hw;
419 size_t size;
420 int i, err;
421
422 /* return error if iov_data is already populated */
423 if (iov_data)
424 return -EBUSY;
425
426 /* The PF should always be able to assign resources */
427 if (!hw->iov.ops.assign_resources)
428 return -ENODEV;
429
430 /* nothing to do if no VFs are requested */
431 if (!num_vfs)
432 return 0;
433
434 /* allocate memory for VF storage */
435 size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
436 iov_data = kzalloc(size, GFP_KERNEL);
437 if (!iov_data)
438 return -ENOMEM;
439
440 /* record number of VFs */
441 iov_data->num_vfs = num_vfs;
442
443 /* loop through vf_info structures initializing each entry */
444 for (i = 0; i < num_vfs; i++) {
445 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
446
447 /* Record VF VSI value */
448 vf_info->vsi = i + 1;
449 vf_info->vf_idx = i;
450
451 /* initialize mailbox memory */
452 err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
453 if (err) {
454 dev_err(&pdev->dev,
455 "Unable to initialize SR-IOV mailbox\n");
456 kfree(iov_data);
457 return err;
458 }
459 }
460
461 /* assign iov_data to interface */
462 interface->iov_data = iov_data;
463
464 /* allocate hardware resources for the VFs */
465 fm10k_iov_resume(pdev);
466
467 return 0;
468}
469
470void fm10k_iov_disable(struct pci_dev *pdev)
471{
472 if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
473 dev_err(&pdev->dev,
474 "Cannot disable SR-IOV while VFs are assigned\n");
475 else
476 pci_disable_sriov(pdev);
477
478 fm10k_iov_free_data(pdev);
479}
480
481static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
482{
483 u32 err_sev;
484 int pos;
485
486 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
487 if (!pos)
488 return;
489
490 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
491 err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
492 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
493}
494
495int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
496{
497 int current_vfs = pci_num_vf(pdev);
498 int err = 0;
499
500 if (current_vfs && pci_vfs_assigned(pdev)) {
501 dev_err(&pdev->dev,
502 "Cannot modify SR-IOV while VFs are assigned\n");
503 num_vfs = current_vfs;
504 } else {
505 pci_disable_sriov(pdev);
506 fm10k_iov_free_data(pdev);
507 }
508
509 /* allocate resources for the VFs */
510 err = fm10k_iov_alloc_data(pdev, num_vfs);
511 if (err)
512 return err;
513
514 /* allocate VFs if not already allocated */
515 if (num_vfs && num_vfs != current_vfs) {
516 /* Disable completer abort error reporting as
517 * the VFs can trigger this any time they read a queue
518 * that they don't own.
519 */
520 fm10k_disable_aer_comp_abort(pdev);
521
522 err = pci_enable_sriov(pdev, num_vfs);
523 if (err) {
524 dev_err(&pdev->dev,
525 "Enable PCI SR-IOV failed: %d\n", err);
526 return err;
527 }
528 }
529
530 return num_vfs;
531}
532
533static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
534 struct fm10k_vf_info *vf_info)
535{
536 struct fm10k_hw *hw = &interface->hw;
537
538 /* assigning the MAC address will send a mailbox message */
539 fm10k_mbx_lock(interface);
540
541 /* disable LPORT for this VF which clears switch rules */
542 hw->iov.ops.reset_lport(hw, vf_info);
543
544 fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
545
546 /* assign new MAC+VLAN for this VF */
547 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
548
549 /* re-enable the LPORT for this VF */
550 hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
551 FM10K_VF_FLAG_MULTI_CAPABLE);
552
553 fm10k_mbx_unlock(interface);
554}
555
556int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
557{
558 struct fm10k_intfc *interface = netdev_priv(netdev);
559 struct fm10k_iov_data *iov_data = interface->iov_data;
560 struct fm10k_vf_info *vf_info;
561
562 /* verify SR-IOV is active and that vf idx is valid */
563 if (!iov_data || vf_idx >= iov_data->num_vfs)
564 return -EINVAL;
565
566 /* verify MAC addr is valid */
567 if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
568 return -EINVAL;
569
570 /* record new MAC address */
571 vf_info = &iov_data->vf_info[vf_idx];
572 ether_addr_copy(vf_info->mac, mac);
573
574 fm10k_reset_vf_info(interface, vf_info);
575
576 return 0;
577}
578
579int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
580 u8 qos, __be16 vlan_proto)
581{
582 struct fm10k_intfc *interface = netdev_priv(netdev);
583 struct fm10k_iov_data *iov_data = interface->iov_data;
584 struct fm10k_hw *hw = &interface->hw;
585 struct fm10k_vf_info *vf_info;
586
587 /* verify SR-IOV is active and that vf idx is valid */
588 if (!iov_data || vf_idx >= iov_data->num_vfs)
589 return -EINVAL;
590
591 /* QOS is unsupported and VLAN IDs accepted range 0-4094 */
592 if (qos || (vid > (VLAN_VID_MASK - 1)))
593 return -EINVAL;
594
595 /* VF VLAN Protocol part to default is unsupported */
596 if (vlan_proto != htons(ETH_P_8021Q))
597 return -EPROTONOSUPPORT;
598
599 vf_info = &iov_data->vf_info[vf_idx];
600
601 /* exit if there is nothing to do */
602 if (vf_info->pf_vid == vid)
603 return 0;
604
605 /* record default VLAN ID for VF */
606 vf_info->pf_vid = vid;
607
608 /* Clear the VLAN table for the VF */
609 hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
610
611 fm10k_reset_vf_info(interface, vf_info);
612
613 return 0;
614}
615
616int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
617 int __always_unused min_rate, int max_rate)
618{
619 struct fm10k_intfc *interface = netdev_priv(netdev);
620 struct fm10k_iov_data *iov_data = interface->iov_data;
621 struct fm10k_hw *hw = &interface->hw;
622
623 /* verify SR-IOV is active and that vf idx is valid */
624 if (!iov_data || vf_idx >= iov_data->num_vfs)
625 return -EINVAL;
626
627 /* rate limit cannot be less than 10Mbs or greater than link speed */
628 if (max_rate &&
629 (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
630 return -EINVAL;
631
632 /* store values */
633 iov_data->vf_info[vf_idx].rate = max_rate;
634
635 /* update hardware configuration */
636 hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
637
638 return 0;
639}
640
641int fm10k_ndo_get_vf_config(struct net_device *netdev,
642 int vf_idx, struct ifla_vf_info *ivi)
643{
644 struct fm10k_intfc *interface = netdev_priv(netdev);
645 struct fm10k_iov_data *iov_data = interface->iov_data;
646 struct fm10k_vf_info *vf_info;
647
648 /* verify SR-IOV is active and that vf idx is valid */
649 if (!iov_data || vf_idx >= iov_data->num_vfs)
650 return -EINVAL;
651
652 vf_info = &iov_data->vf_info[vf_idx];
653
654 ivi->vf = vf_idx;
655 ivi->max_tx_rate = vf_info->rate;
656 ivi->min_tx_rate = 0;
657 ether_addr_copy(ivi->mac, vf_info->mac);
658 ivi->vlan = vf_info->pf_vid;
659 ivi->qos = 0;
660
661 return 0;
662}