Loading...
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26
27#include "vf.h"
28#include "ixgbevf.h"
29
30/* On Hyper-V, to reset, we need to read from this offset
31 * from the PCI config space. This is the mechanism used on
32 * Hyper-V to support PF/VF communication.
33 */
34#define IXGBE_HV_RESET_OFFSET 0x201
35
36static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
37 u32 *retmsg, u16 size)
38{
39 struct ixgbe_mbx_info *mbx = &hw->mbx;
40 s32 retval = mbx->ops.write_posted(hw, msg, size);
41
42 if (retval)
43 return retval;
44
45 return mbx->ops.read_posted(hw, retmsg, size);
46}
47
48/**
49 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
50 * @hw: pointer to hardware structure
51 *
52 * Starts the hardware by filling the bus info structure and media type, clears
53 * all on chip counters, initializes receive address registers, multicast
54 * table, VLAN filter table, calls routine to set up link and flow control
55 * settings, and leaves transmit and receive units disabled and uninitialized
56 **/
57static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
58{
59 /* Clear adapter stopped flag */
60 hw->adapter_stopped = false;
61
62 return 0;
63}
64
65/**
66 * ixgbevf_init_hw_vf - virtual function hardware initialization
67 * @hw: pointer to hardware structure
68 *
69 * Initialize the hardware by resetting the hardware and then starting
70 * the hardware
71 **/
72static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
73{
74 s32 status = hw->mac.ops.start_hw(hw);
75
76 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
77
78 return status;
79}
80
81/**
82 * ixgbevf_reset_hw_vf - Performs hardware reset
83 * @hw: pointer to hardware structure
84 *
85 * Resets the hardware by resetting the transmit and receive units, masks and
86 * clears all interrupts.
87 **/
88static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
89{
90 struct ixgbe_mbx_info *mbx = &hw->mbx;
91 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
92 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
93 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
94 u8 *addr = (u8 *)(&msgbuf[1]);
95
96 /* Call adapter stop to disable tx/rx and clear interrupts */
97 hw->mac.ops.stop_adapter(hw);
98
99 /* reset the api version */
100 hw->api_version = ixgbe_mbox_api_10;
101
102 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
103 IXGBE_WRITE_FLUSH(hw);
104
105 /* we cannot reset while the RSTI / RSTD bits are asserted */
106 while (!mbx->ops.check_for_rst(hw) && timeout) {
107 timeout--;
108 udelay(5);
109 }
110
111 if (!timeout)
112 return IXGBE_ERR_RESET_FAILED;
113
114 /* mailbox timeout can now become active */
115 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
116
117 msgbuf[0] = IXGBE_VF_RESET;
118 mbx->ops.write_posted(hw, msgbuf, 1);
119
120 mdelay(10);
121
122 /* set our "perm_addr" based on info provided by PF
123 * also set up the mc_filter_type which is piggy backed
124 * on the mac address in word 3
125 */
126 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
127 if (ret_val)
128 return ret_val;
129
130 /* New versions of the PF may NACK the reset return message
131 * to indicate that no MAC address has yet been assigned for
132 * the VF.
133 */
134 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
135 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
136 return IXGBE_ERR_INVALID_MAC_ADDR;
137
138 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
139 ether_addr_copy(hw->mac.perm_addr, addr);
140
141 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
142
143 return 0;
144}
145
146/**
147 * Hyper-V variant; the VF/PF communication is through the PCI
148 * config space.
149 * @hw: pointer to private hardware struct
150 */
151static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
152{
153#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
154 struct ixgbevf_adapter *adapter = hw->back;
155 int i;
156
157 for (i = 0; i < 6; i++)
158 pci_read_config_byte(adapter->pdev,
159 (i + IXGBE_HV_RESET_OFFSET),
160 &hw->mac.perm_addr[i]);
161 return 0;
162#else
163 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
164 return -EOPNOTSUPP;
165#endif
166}
167
168/**
169 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
170 * @hw: pointer to hardware structure
171 *
172 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
173 * disables transmit and receive units. The adapter_stopped flag is used by
174 * the shared code and drivers to determine if the adapter is in a stopped
175 * state and should not touch the hardware.
176 **/
177static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
178{
179 u32 number_of_queues;
180 u32 reg_val;
181 u16 i;
182
183 /* Set the adapter_stopped flag so other driver functions stop touching
184 * the hardware
185 */
186 hw->adapter_stopped = true;
187
188 /* Disable the receive unit by stopped each queue */
189 number_of_queues = hw->mac.max_rx_queues;
190 for (i = 0; i < number_of_queues; i++) {
191 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
192 if (reg_val & IXGBE_RXDCTL_ENABLE) {
193 reg_val &= ~IXGBE_RXDCTL_ENABLE;
194 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
195 }
196 }
197
198 IXGBE_WRITE_FLUSH(hw);
199
200 /* Clear interrupt mask to stop from interrupts being generated */
201 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
202
203 /* Clear any pending interrupts */
204 IXGBE_READ_REG(hw, IXGBE_VTEICR);
205
206 /* Disable the transmit unit. Each queue must be disabled. */
207 number_of_queues = hw->mac.max_tx_queues;
208 for (i = 0; i < number_of_queues; i++) {
209 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
210 if (reg_val & IXGBE_TXDCTL_ENABLE) {
211 reg_val &= ~IXGBE_TXDCTL_ENABLE;
212 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
213 }
214 }
215
216 return 0;
217}
218
219/**
220 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
221 * @hw: pointer to hardware structure
222 * @mc_addr: the multicast address
223 *
224 * Extracts the 12 bits, from a multicast address, to determine which
225 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
226 * incoming Rx multicast addresses, to determine the bit-vector to check in
227 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
228 * by the MO field of the MCSTCTRL. The MO field is set during initialization
229 * to mc_filter_type.
230 **/
231static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
232{
233 u32 vector = 0;
234
235 switch (hw->mac.mc_filter_type) {
236 case 0: /* use bits [47:36] of the address */
237 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
238 break;
239 case 1: /* use bits [46:35] of the address */
240 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
241 break;
242 case 2: /* use bits [45:34] of the address */
243 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
244 break;
245 case 3: /* use bits [43:32] of the address */
246 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
247 break;
248 default: /* Invalid mc_filter_type */
249 break;
250 }
251
252 /* vector can only be 12-bits or boundary will be exceeded */
253 vector &= 0xFFF;
254 return vector;
255}
256
257/**
258 * ixgbevf_get_mac_addr_vf - Read device MAC address
259 * @hw: pointer to the HW structure
260 * @mac_addr: pointer to storage for retrieved MAC address
261 **/
262static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
263{
264 ether_addr_copy(mac_addr, hw->mac.perm_addr);
265
266 return 0;
267}
268
269static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
270{
271 u32 msgbuf[3], msgbuf_chk;
272 u8 *msg_addr = (u8 *)(&msgbuf[1]);
273 s32 ret_val;
274
275 memset(msgbuf, 0, sizeof(msgbuf));
276 /* If index is one then this is the start of a new list and needs
277 * indication to the PF so it can do it's own list management.
278 * If it is zero then that tells the PF to just clear all of
279 * this VF's macvlans and there is no new list.
280 */
281 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
282 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
283 msgbuf_chk = msgbuf[0];
284
285 if (addr)
286 ether_addr_copy(msg_addr, addr);
287
288 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
289 ARRAY_SIZE(msgbuf));
290 if (!ret_val) {
291 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
292
293 if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
294 return -ENOMEM;
295 }
296
297 return ret_val;
298}
299
300static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
301{
302 return -EOPNOTSUPP;
303}
304
305/**
306 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
307 * @hw: pointer to hardware structure
308 * @reta: buffer to fill with RETA contents.
309 * @num_rx_queues: Number of Rx queues configured for this port
310 *
311 * The "reta" buffer should be big enough to contain 32 registers.
312 *
313 * Returns: 0 on success.
314 * if API doesn't support this operation - (-EOPNOTSUPP).
315 */
316int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
317{
318 int err, i, j;
319 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
320 u32 *hw_reta = &msgbuf[1];
321 u32 mask = 0;
322
323 /* We have to use a mailbox for 82599 and x540 devices only.
324 * For these devices RETA has 128 entries.
325 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
326 * 16 RETA entries in each DWORD giving 2 bits to each entry.
327 */
328 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
329
330 /* We support the RSS querying for 82599 and x540 devices only.
331 * Thus return an error if API doesn't support RETA querying or querying
332 * is not supported for this device type.
333 */
334 switch (hw->api_version) {
335 case ixgbe_mbox_api_13:
336 case ixgbe_mbox_api_12:
337 if (hw->mac.type < ixgbe_mac_X550_vf)
338 break;
339 /* fall through */
340 default:
341 return -EOPNOTSUPP;
342 }
343
344 msgbuf[0] = IXGBE_VF_GET_RETA;
345
346 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
347
348 if (err)
349 return err;
350
351 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
352
353 if (err)
354 return err;
355
356 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
357
358 /* If the operation has been refused by a PF return -EPERM */
359 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
360 return -EPERM;
361
362 /* If we didn't get an ACK there must have been
363 * some sort of mailbox error so we should treat it
364 * as such.
365 */
366 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
367 return IXGBE_ERR_MBX;
368
369 /* ixgbevf doesn't support more than 2 queues at the moment */
370 if (num_rx_queues > 1)
371 mask = 0x1;
372
373 for (i = 0; i < dwords; i++)
374 for (j = 0; j < 16; j++)
375 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
376
377 return 0;
378}
379
380/**
381 * ixgbevf_get_rss_key_locked - get the RSS Random Key
382 * @hw: pointer to the HW structure
383 * @rss_key: buffer to fill with RSS Hash Key contents.
384 *
385 * The "rss_key" buffer should be big enough to contain 10 registers.
386 *
387 * Returns: 0 on success.
388 * if API doesn't support this operation - (-EOPNOTSUPP).
389 */
390int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
391{
392 int err;
393 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
394
395 /* We currently support the RSS Random Key retrieval for 82599 and x540
396 * devices only.
397 *
398 * Thus return an error if API doesn't support RSS Random Key retrieval
399 * or if the operation is not supported for this device type.
400 */
401 switch (hw->api_version) {
402 case ixgbe_mbox_api_13:
403 case ixgbe_mbox_api_12:
404 if (hw->mac.type < ixgbe_mac_X550_vf)
405 break;
406 /* fall through */
407 default:
408 return -EOPNOTSUPP;
409 }
410
411 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
412 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
413
414 if (err)
415 return err;
416
417 err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
418
419 if (err)
420 return err;
421
422 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
423
424 /* If the operation has been refused by a PF return -EPERM */
425 if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
426 return -EPERM;
427
428 /* If we didn't get an ACK there must have been
429 * some sort of mailbox error so we should treat it
430 * as such.
431 */
432 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
433 return IXGBE_ERR_MBX;
434
435 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
436
437 return 0;
438}
439
440/**
441 * ixgbevf_set_rar_vf - set device MAC address
442 * @hw: pointer to hardware structure
443 * @index: Receive address register to write
444 * @addr: Address to put into receive address register
445 * @vmdq: Unused in this implementation
446 **/
447static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
448 u32 vmdq)
449{
450 u32 msgbuf[3];
451 u8 *msg_addr = (u8 *)(&msgbuf[1]);
452 s32 ret_val;
453
454 memset(msgbuf, 0, sizeof(msgbuf));
455 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
456 ether_addr_copy(msg_addr, addr);
457
458 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
459 ARRAY_SIZE(msgbuf));
460 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
461
462 /* if nacked the address was rejected, use "perm_addr" */
463 if (!ret_val &&
464 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
465 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
466 return IXGBE_ERR_MBX;
467 }
468
469 return ret_val;
470}
471
472/**
473 * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
474 * @hw: pointer to hardware structure
475 * @index: Receive address register to write
476 * @addr: Address to put into receive address register
477 * @vmdq: Unused in this implementation
478 *
479 * We don't really allow setting the device MAC address. However,
480 * if the address being set is the permanent MAC address we will
481 * permit that.
482 **/
483static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
484 u32 vmdq)
485{
486 if (ether_addr_equal(addr, hw->mac.perm_addr))
487 return 0;
488
489 return -EOPNOTSUPP;
490}
491
492/**
493 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
494 * @hw: pointer to the HW structure
495 * @netdev: pointer to net device structure
496 *
497 * Updates the Multicast Table Array.
498 **/
499static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
500 struct net_device *netdev)
501{
502 struct netdev_hw_addr *ha;
503 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
504 u16 *vector_list = (u16 *)&msgbuf[1];
505 u32 cnt, i;
506
507 /* Each entry in the list uses 1 16 bit word. We have 30
508 * 16 bit words available in our HW msg buffer (minus 1 for the
509 * msg type). That's 30 hash values if we pack 'em right. If
510 * there are more than 30 MC addresses to add then punt the
511 * extras for now and then add code to handle more than 30 later.
512 * It would be unusual for a server to request that many multi-cast
513 * addresses except for in large enterprise network environments.
514 */
515
516 cnt = netdev_mc_count(netdev);
517 if (cnt > 30)
518 cnt = 30;
519 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
520 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
521
522 i = 0;
523 netdev_for_each_mc_addr(ha, netdev) {
524 if (i == cnt)
525 break;
526 if (is_link_local_ether_addr(ha->addr))
527 continue;
528
529 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
530 }
531
532 ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
533
534 return 0;
535}
536
537/**
538 * Hyper-V variant - just a stub.
539 * @hw: unused
540 * @netdev: unused
541 */
542static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
543 struct net_device *netdev)
544{
545 return -EOPNOTSUPP;
546}
547
548/**
549 * ixgbevf_update_xcast_mode - Update Multicast mode
550 * @hw: pointer to the HW structure
551 * @xcast_mode: new multicast mode
552 *
553 * Updates the Multicast Mode of VF.
554 **/
555static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
556{
557 u32 msgbuf[2];
558 s32 err;
559
560 switch (hw->api_version) {
561 case ixgbe_mbox_api_12:
562 /* promisc introduced in 1.3 version */
563 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
564 return -EOPNOTSUPP;
565 /* Fall threw */
566 case ixgbe_mbox_api_13:
567 break;
568 default:
569 return -EOPNOTSUPP;
570 }
571
572 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
573 msgbuf[1] = xcast_mode;
574
575 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
576 ARRAY_SIZE(msgbuf));
577 if (err)
578 return err;
579
580 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
581 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
582 return -EPERM;
583
584 return 0;
585}
586
587/**
588 * Hyper-V variant - just a stub.
589 * @hw: unused
590 * @xcast_mode: unused
591 */
592static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
593{
594 return -EOPNOTSUPP;
595}
596
597/**
598 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
599 * @hw: pointer to the HW structure
600 * @vlan: 12 bit VLAN ID
601 * @vind: unused by VF drivers
602 * @vlan_on: if true then set bit, else clear bit
603 **/
604static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
605 bool vlan_on)
606{
607 u32 msgbuf[2];
608 s32 err;
609
610 msgbuf[0] = IXGBE_VF_SET_VLAN;
611 msgbuf[1] = vlan;
612 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
613 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
614
615 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
616 ARRAY_SIZE(msgbuf));
617 if (err)
618 goto mbx_err;
619
620 /* remove extra bits from the message */
621 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
622 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
623
624 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
625 err = IXGBE_ERR_INVALID_ARGUMENT;
626
627mbx_err:
628 return err;
629}
630
631/**
632 * Hyper-V variant - just a stub.
633 * @hw: unused
634 * @vlan: unused
635 * @vind: unused
636 * @vlan_on: unused
637 */
638static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
639 bool vlan_on)
640{
641 return -EOPNOTSUPP;
642}
643
644/**
645 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
646 * @hw: pointer to hardware structure
647 * @speed: Unused in this implementation
648 * @autoneg: Unused in this implementation
649 * @autoneg_wait_to_complete: Unused in this implementation
650 *
651 * Do nothing and return success. VF drivers are not allowed to change
652 * global settings. Maintained for driver compatibility.
653 **/
654static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
655 ixgbe_link_speed speed, bool autoneg,
656 bool autoneg_wait_to_complete)
657{
658 return 0;
659}
660
661/**
662 * ixgbevf_check_mac_link_vf - Get link/speed status
663 * @hw: pointer to hardware structure
664 * @speed: pointer to link speed
665 * @link_up: true is link is up, false otherwise
666 * @autoneg_wait_to_complete: unused
667 *
668 * Reads the links register to determine if link is up and the current speed
669 **/
670static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
671 ixgbe_link_speed *speed,
672 bool *link_up,
673 bool autoneg_wait_to_complete)
674{
675 struct ixgbe_mbx_info *mbx = &hw->mbx;
676 struct ixgbe_mac_info *mac = &hw->mac;
677 s32 ret_val = 0;
678 u32 links_reg;
679 u32 in_msg = 0;
680
681 /* If we were hit with a reset drop the link */
682 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
683 mac->get_link_status = true;
684
685 if (!mac->get_link_status)
686 goto out;
687
688 /* if link status is down no point in checking to see if pf is up */
689 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
690 if (!(links_reg & IXGBE_LINKS_UP))
691 goto out;
692
693 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
694 * before the link status is correct
695 */
696 if (mac->type == ixgbe_mac_82599_vf) {
697 int i;
698
699 for (i = 0; i < 5; i++) {
700 udelay(100);
701 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
702
703 if (!(links_reg & IXGBE_LINKS_UP))
704 goto out;
705 }
706 }
707
708 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
709 case IXGBE_LINKS_SPEED_10G_82599:
710 *speed = IXGBE_LINK_SPEED_10GB_FULL;
711 break;
712 case IXGBE_LINKS_SPEED_1G_82599:
713 *speed = IXGBE_LINK_SPEED_1GB_FULL;
714 break;
715 case IXGBE_LINKS_SPEED_100_82599:
716 *speed = IXGBE_LINK_SPEED_100_FULL;
717 break;
718 }
719
720 /* if the read failed it could just be a mailbox collision, best wait
721 * until we are called again and don't report an error
722 */
723 if (mbx->ops.read(hw, &in_msg, 1))
724 goto out;
725
726 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
727 /* msg is not CTS and is NACK we must have lost CTS status */
728 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
729 ret_val = -1;
730 goto out;
731 }
732
733 /* the pf is talking, if we timed out in the past we reinit */
734 if (!mbx->timeout) {
735 ret_val = -1;
736 goto out;
737 }
738
739 /* if we passed all the tests above then the link is up and we no
740 * longer need to check for link
741 */
742 mac->get_link_status = false;
743
744out:
745 *link_up = !mac->get_link_status;
746 return ret_val;
747}
748
749/**
750 * Hyper-V variant; there is no mailbox communication.
751 * @hw: pointer to private hardware struct
752 * @speed: pointer to link speed
753 * @link_up: true is link is up, false otherwise
754 * @autoneg_wait_to_complete: unused
755 */
756static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
757 ixgbe_link_speed *speed,
758 bool *link_up,
759 bool autoneg_wait_to_complete)
760{
761 struct ixgbe_mbx_info *mbx = &hw->mbx;
762 struct ixgbe_mac_info *mac = &hw->mac;
763 u32 links_reg;
764
765 /* If we were hit with a reset drop the link */
766 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
767 mac->get_link_status = true;
768
769 if (!mac->get_link_status)
770 goto out;
771
772 /* if link status is down no point in checking to see if pf is up */
773 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
774 if (!(links_reg & IXGBE_LINKS_UP))
775 goto out;
776
777 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
778 * before the link status is correct
779 */
780 if (mac->type == ixgbe_mac_82599_vf) {
781 int i;
782
783 for (i = 0; i < 5; i++) {
784 udelay(100);
785 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
786
787 if (!(links_reg & IXGBE_LINKS_UP))
788 goto out;
789 }
790 }
791
792 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
793 case IXGBE_LINKS_SPEED_10G_82599:
794 *speed = IXGBE_LINK_SPEED_10GB_FULL;
795 break;
796 case IXGBE_LINKS_SPEED_1G_82599:
797 *speed = IXGBE_LINK_SPEED_1GB_FULL;
798 break;
799 case IXGBE_LINKS_SPEED_100_82599:
800 *speed = IXGBE_LINK_SPEED_100_FULL;
801 break;
802 }
803
804 /* if we passed all the tests above then the link is up and we no
805 * longer need to check for link
806 */
807 mac->get_link_status = false;
808
809out:
810 *link_up = !mac->get_link_status;
811 return 0;
812}
813
814/**
815 * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
816 * @hw: pointer to the HW structure
817 * @max_size: value to assign to max frame size
818 **/
819static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
820{
821 u32 msgbuf[2];
822 s32 ret_val;
823
824 msgbuf[0] = IXGBE_VF_SET_LPE;
825 msgbuf[1] = max_size;
826
827 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
828 ARRAY_SIZE(msgbuf));
829 if (ret_val)
830 return ret_val;
831 if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
832 (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
833 return IXGBE_ERR_MBX;
834
835 return 0;
836}
837
838/**
839 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
840 * @hw: pointer to the HW structure
841 * @max_size: value to assign to max frame size
842 * Hyper-V variant.
843 **/
844static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
845{
846 u32 reg;
847
848 /* If we are on Hyper-V, we implement this functionality
849 * differently.
850 */
851 reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
852 /* CRC == 4 */
853 reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
854 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
855
856 return 0;
857}
858
859/**
860 * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
861 * @hw: pointer to the HW structure
862 * @api: integer containing requested API version
863 **/
864static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
865{
866 int err;
867 u32 msg[3];
868
869 /* Negotiate the mailbox API version */
870 msg[0] = IXGBE_VF_API_NEGOTIATE;
871 msg[1] = api;
872 msg[2] = 0;
873
874 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
875 if (!err) {
876 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
877
878 /* Store value and return 0 on success */
879 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
880 hw->api_version = api;
881 return 0;
882 }
883
884 err = IXGBE_ERR_INVALID_ARGUMENT;
885 }
886
887 return err;
888}
889
890/**
891 * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
892 * @hw: pointer to the HW structure
893 * @api: integer containing requested API version
894 * Hyper-V version - only ixgbe_mbox_api_10 supported.
895 **/
896static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
897{
898 /* Hyper-V only supports api version ixgbe_mbox_api_10 */
899 if (api != ixgbe_mbox_api_10)
900 return IXGBE_ERR_INVALID_ARGUMENT;
901
902 return 0;
903}
904
905int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
906 unsigned int *default_tc)
907{
908 int err;
909 u32 msg[5];
910
911 /* do nothing if API doesn't support ixgbevf_get_queues */
912 switch (hw->api_version) {
913 case ixgbe_mbox_api_11:
914 case ixgbe_mbox_api_12:
915 case ixgbe_mbox_api_13:
916 break;
917 default:
918 return 0;
919 }
920
921 /* Fetch queue configuration from the PF */
922 msg[0] = IXGBE_VF_GET_QUEUE;
923 msg[1] = msg[2] = msg[3] = msg[4] = 0;
924
925 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
926 if (!err) {
927 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
928
929 /* if we we didn't get an ACK there must have been
930 * some sort of mailbox error so we should treat it
931 * as such
932 */
933 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
934 return IXGBE_ERR_MBX;
935
936 /* record and validate values from message */
937 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
938 if (hw->mac.max_tx_queues == 0 ||
939 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
940 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
941
942 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
943 if (hw->mac.max_rx_queues == 0 ||
944 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
945 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
946
947 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
948 /* in case of unknown state assume we cannot tag frames */
949 if (*num_tcs > hw->mac.max_rx_queues)
950 *num_tcs = 1;
951
952 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
953 /* default to queue 0 on out-of-bounds queue number */
954 if (*default_tc >= hw->mac.max_tx_queues)
955 *default_tc = 0;
956 }
957
958 return err;
959}
960
961static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
962 .init_hw = ixgbevf_init_hw_vf,
963 .reset_hw = ixgbevf_reset_hw_vf,
964 .start_hw = ixgbevf_start_hw_vf,
965 .get_mac_addr = ixgbevf_get_mac_addr_vf,
966 .stop_adapter = ixgbevf_stop_hw_vf,
967 .setup_link = ixgbevf_setup_mac_link_vf,
968 .check_link = ixgbevf_check_mac_link_vf,
969 .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
970 .set_rar = ixgbevf_set_rar_vf,
971 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
972 .update_xcast_mode = ixgbevf_update_xcast_mode,
973 .set_uc_addr = ixgbevf_set_uc_addr_vf,
974 .set_vfta = ixgbevf_set_vfta_vf,
975 .set_rlpml = ixgbevf_set_rlpml_vf,
976};
977
978static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
979 .init_hw = ixgbevf_init_hw_vf,
980 .reset_hw = ixgbevf_hv_reset_hw_vf,
981 .start_hw = ixgbevf_start_hw_vf,
982 .get_mac_addr = ixgbevf_get_mac_addr_vf,
983 .stop_adapter = ixgbevf_stop_hw_vf,
984 .setup_link = ixgbevf_setup_mac_link_vf,
985 .check_link = ixgbevf_hv_check_mac_link_vf,
986 .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
987 .set_rar = ixgbevf_hv_set_rar_vf,
988 .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
989 .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
990 .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
991 .set_vfta = ixgbevf_hv_set_vfta_vf,
992 .set_rlpml = ixgbevf_hv_set_rlpml_vf,
993};
994
995const struct ixgbevf_info ixgbevf_82599_vf_info = {
996 .mac = ixgbe_mac_82599_vf,
997 .mac_ops = &ixgbevf_mac_ops,
998};
999
1000const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
1001 .mac = ixgbe_mac_82599_vf,
1002 .mac_ops = &ixgbevf_hv_mac_ops,
1003};
1004
1005const struct ixgbevf_info ixgbevf_X540_vf_info = {
1006 .mac = ixgbe_mac_X540_vf,
1007 .mac_ops = &ixgbevf_mac_ops,
1008};
1009
1010const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1011 .mac = ixgbe_mac_X540_vf,
1012 .mac_ops = &ixgbevf_hv_mac_ops,
1013};
1014
1015const struct ixgbevf_info ixgbevf_X550_vf_info = {
1016 .mac = ixgbe_mac_X550_vf,
1017 .mac_ops = &ixgbevf_mac_ops,
1018};
1019
1020const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1021 .mac = ixgbe_mac_X550_vf,
1022 .mac_ops = &ixgbevf_hv_mac_ops,
1023};
1024
1025const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1026 .mac = ixgbe_mac_X550EM_x_vf,
1027 .mac_ops = &ixgbevf_mac_ops,
1028};
1029
1030const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1031 .mac = ixgbe_mac_X550EM_x_vf,
1032 .mac_ops = &ixgbevf_hv_mac_ops,
1033};
1034
1035const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1036 .mac = ixgbe_mac_x550em_a_vf,
1037 .mac_ops = &ixgbevf_mac_ops,
1038};
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4#include "vf.h"
5#include "ixgbevf.h"
6
7/* On Hyper-V, to reset, we need to read from this offset
8 * from the PCI config space. This is the mechanism used on
9 * Hyper-V to support PF/VF communication.
10 */
11#define IXGBE_HV_RESET_OFFSET 0x201
12
13static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
14 u32 *retmsg, u16 size)
15{
16 struct ixgbe_mbx_info *mbx = &hw->mbx;
17 s32 retval = mbx->ops.write_posted(hw, msg, size);
18
19 if (retval)
20 return retval;
21
22 return mbx->ops.read_posted(hw, retmsg, size);
23}
24
25/**
26 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
27 * @hw: pointer to hardware structure
28 *
29 * Starts the hardware by filling the bus info structure and media type, clears
30 * all on chip counters, initializes receive address registers, multicast
31 * table, VLAN filter table, calls routine to set up link and flow control
32 * settings, and leaves transmit and receive units disabled and uninitialized
33 **/
34static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
35{
36 /* Clear adapter stopped flag */
37 hw->adapter_stopped = false;
38
39 return 0;
40}
41
42/**
43 * ixgbevf_init_hw_vf - virtual function hardware initialization
44 * @hw: pointer to hardware structure
45 *
46 * Initialize the hardware by resetting the hardware and then starting
47 * the hardware
48 **/
49static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
50{
51 s32 status = hw->mac.ops.start_hw(hw);
52
53 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
54
55 return status;
56}
57
58/**
59 * ixgbevf_reset_hw_vf - Performs hardware reset
60 * @hw: pointer to hardware structure
61 *
62 * Resets the hardware by resetting the transmit and receive units, masks and
63 * clears all interrupts.
64 **/
65static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
66{
67 struct ixgbe_mbx_info *mbx = &hw->mbx;
68 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
69 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
70 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
71 u8 *addr = (u8 *)(&msgbuf[1]);
72
73 /* Call adapter stop to disable tx/rx and clear interrupts */
74 hw->mac.ops.stop_adapter(hw);
75
76 /* reset the api version */
77 hw->api_version = ixgbe_mbox_api_10;
78
79 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
80 IXGBE_WRITE_FLUSH(hw);
81
82 /* we cannot reset while the RSTI / RSTD bits are asserted */
83 while (!mbx->ops.check_for_rst(hw) && timeout) {
84 timeout--;
85 udelay(5);
86 }
87
88 if (!timeout)
89 return IXGBE_ERR_RESET_FAILED;
90
91 /* mailbox timeout can now become active */
92 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
93
94 msgbuf[0] = IXGBE_VF_RESET;
95 mbx->ops.write_posted(hw, msgbuf, 1);
96
97 mdelay(10);
98
99 /* set our "perm_addr" based on info provided by PF
100 * also set up the mc_filter_type which is piggy backed
101 * on the mac address in word 3
102 */
103 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
104 if (ret_val)
105 return ret_val;
106
107 /* New versions of the PF may NACK the reset return message
108 * to indicate that no MAC address has yet been assigned for
109 * the VF.
110 */
111 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
112 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
113 return IXGBE_ERR_INVALID_MAC_ADDR;
114
115 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
116 ether_addr_copy(hw->mac.perm_addr, addr);
117
118 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
119
120 return 0;
121}
122
123/**
124 * Hyper-V variant; the VF/PF communication is through the PCI
125 * config space.
126 * @hw: pointer to private hardware struct
127 */
128static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
129{
130#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
131 struct ixgbevf_adapter *adapter = hw->back;
132 int i;
133
134 for (i = 0; i < 6; i++)
135 pci_read_config_byte(adapter->pdev,
136 (i + IXGBE_HV_RESET_OFFSET),
137 &hw->mac.perm_addr[i]);
138 return 0;
139#else
140 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
141 return -EOPNOTSUPP;
142#endif
143}
144
145/**
146 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
147 * @hw: pointer to hardware structure
148 *
149 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
150 * disables transmit and receive units. The adapter_stopped flag is used by
151 * the shared code and drivers to determine if the adapter is in a stopped
152 * state and should not touch the hardware.
153 **/
154static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
155{
156 u32 number_of_queues;
157 u32 reg_val;
158 u16 i;
159
160 /* Set the adapter_stopped flag so other driver functions stop touching
161 * the hardware
162 */
163 hw->adapter_stopped = true;
164
165 /* Disable the receive unit by stopped each queue */
166 number_of_queues = hw->mac.max_rx_queues;
167 for (i = 0; i < number_of_queues; i++) {
168 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
169 if (reg_val & IXGBE_RXDCTL_ENABLE) {
170 reg_val &= ~IXGBE_RXDCTL_ENABLE;
171 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
172 }
173 }
174
175 IXGBE_WRITE_FLUSH(hw);
176
177 /* Clear interrupt mask to stop from interrupts being generated */
178 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
179
180 /* Clear any pending interrupts */
181 IXGBE_READ_REG(hw, IXGBE_VTEICR);
182
183 /* Disable the transmit unit. Each queue must be disabled. */
184 number_of_queues = hw->mac.max_tx_queues;
185 for (i = 0; i < number_of_queues; i++) {
186 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
187 if (reg_val & IXGBE_TXDCTL_ENABLE) {
188 reg_val &= ~IXGBE_TXDCTL_ENABLE;
189 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
190 }
191 }
192
193 return 0;
194}
195
196/**
197 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
198 * @hw: pointer to hardware structure
199 * @mc_addr: the multicast address
200 *
201 * Extracts the 12 bits, from a multicast address, to determine which
202 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
203 * incoming Rx multicast addresses, to determine the bit-vector to check in
204 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
205 * by the MO field of the MCSTCTRL. The MO field is set during initialization
206 * to mc_filter_type.
207 **/
208static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
209{
210 u32 vector = 0;
211
212 switch (hw->mac.mc_filter_type) {
213 case 0: /* use bits [47:36] of the address */
214 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
215 break;
216 case 1: /* use bits [46:35] of the address */
217 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
218 break;
219 case 2: /* use bits [45:34] of the address */
220 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
221 break;
222 case 3: /* use bits [43:32] of the address */
223 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
224 break;
225 default: /* Invalid mc_filter_type */
226 break;
227 }
228
229 /* vector can only be 12-bits or boundary will be exceeded */
230 vector &= 0xFFF;
231 return vector;
232}
233
234/**
235 * ixgbevf_get_mac_addr_vf - Read device MAC address
236 * @hw: pointer to the HW structure
237 * @mac_addr: pointer to storage for retrieved MAC address
238 **/
239static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
240{
241 ether_addr_copy(mac_addr, hw->mac.perm_addr);
242
243 return 0;
244}
245
246static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
247{
248 u32 msgbuf[3], msgbuf_chk;
249 u8 *msg_addr = (u8 *)(&msgbuf[1]);
250 s32 ret_val;
251
252 memset(msgbuf, 0, sizeof(msgbuf));
253 /* If index is one then this is the start of a new list and needs
254 * indication to the PF so it can do it's own list management.
255 * If it is zero then that tells the PF to just clear all of
256 * this VF's macvlans and there is no new list.
257 */
258 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
259 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
260 msgbuf_chk = msgbuf[0];
261
262 if (addr)
263 ether_addr_copy(msg_addr, addr);
264
265 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
266 ARRAY_SIZE(msgbuf));
267 if (!ret_val) {
268 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
269
270 if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
271 return -ENOMEM;
272 }
273
274 return ret_val;
275}
276
277static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
278{
279 return -EOPNOTSUPP;
280}
281
282/**
283 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
284 * @hw: pointer to hardware structure
285 * @reta: buffer to fill with RETA contents.
286 * @num_rx_queues: Number of Rx queues configured for this port
287 *
288 * The "reta" buffer should be big enough to contain 32 registers.
289 *
290 * Returns: 0 on success.
291 * if API doesn't support this operation - (-EOPNOTSUPP).
292 */
293int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
294{
295 int err, i, j;
296 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
297 u32 *hw_reta = &msgbuf[1];
298 u32 mask = 0;
299
300 /* We have to use a mailbox for 82599 and x540 devices only.
301 * For these devices RETA has 128 entries.
302 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
303 * 16 RETA entries in each DWORD giving 2 bits to each entry.
304 */
305 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
306
307 /* We support the RSS querying for 82599 and x540 devices only.
308 * Thus return an error if API doesn't support RETA querying or querying
309 * is not supported for this device type.
310 */
311 switch (hw->api_version) {
312 case ixgbe_mbox_api_14:
313 case ixgbe_mbox_api_13:
314 case ixgbe_mbox_api_12:
315 if (hw->mac.type < ixgbe_mac_X550_vf)
316 break;
317 fallthrough;
318 default:
319 return -EOPNOTSUPP;
320 }
321
322 msgbuf[0] = IXGBE_VF_GET_RETA;
323
324 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
325
326 if (err)
327 return err;
328
329 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
330
331 if (err)
332 return err;
333
334 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
335
336 /* If the operation has been refused by a PF return -EPERM */
337 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
338 return -EPERM;
339
340 /* If we didn't get an ACK there must have been
341 * some sort of mailbox error so we should treat it
342 * as such.
343 */
344 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
345 return IXGBE_ERR_MBX;
346
347 /* ixgbevf doesn't support more than 2 queues at the moment */
348 if (num_rx_queues > 1)
349 mask = 0x1;
350
351 for (i = 0; i < dwords; i++)
352 for (j = 0; j < 16; j++)
353 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
354
355 return 0;
356}
357
358/**
359 * ixgbevf_get_rss_key_locked - get the RSS Random Key
360 * @hw: pointer to the HW structure
361 * @rss_key: buffer to fill with RSS Hash Key contents.
362 *
363 * The "rss_key" buffer should be big enough to contain 10 registers.
364 *
365 * Returns: 0 on success.
366 * if API doesn't support this operation - (-EOPNOTSUPP).
367 */
368int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
369{
370 int err;
371 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
372
373 /* We currently support the RSS Random Key retrieval for 82599 and x540
374 * devices only.
375 *
376 * Thus return an error if API doesn't support RSS Random Key retrieval
377 * or if the operation is not supported for this device type.
378 */
379 switch (hw->api_version) {
380 case ixgbe_mbox_api_14:
381 case ixgbe_mbox_api_13:
382 case ixgbe_mbox_api_12:
383 if (hw->mac.type < ixgbe_mac_X550_vf)
384 break;
385 fallthrough;
386 default:
387 return -EOPNOTSUPP;
388 }
389
390 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
391 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
392
393 if (err)
394 return err;
395
396 err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
397
398 if (err)
399 return err;
400
401 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
402
403 /* If the operation has been refused by a PF return -EPERM */
404 if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
405 return -EPERM;
406
407 /* If we didn't get an ACK there must have been
408 * some sort of mailbox error so we should treat it
409 * as such.
410 */
411 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
412 return IXGBE_ERR_MBX;
413
414 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
415
416 return 0;
417}
418
419/**
420 * ixgbevf_set_rar_vf - set device MAC address
421 * @hw: pointer to hardware structure
422 * @index: Receive address register to write
423 * @addr: Address to put into receive address register
424 * @vmdq: Unused in this implementation
425 **/
426static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
427 u32 vmdq)
428{
429 u32 msgbuf[3];
430 u8 *msg_addr = (u8 *)(&msgbuf[1]);
431 s32 ret_val;
432
433 memset(msgbuf, 0, sizeof(msgbuf));
434 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
435 ether_addr_copy(msg_addr, addr);
436
437 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
438 ARRAY_SIZE(msgbuf));
439 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
440
441 /* if nacked the address was rejected, use "perm_addr" */
442 if (!ret_val &&
443 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
444 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
445 return IXGBE_ERR_MBX;
446 }
447
448 return ret_val;
449}
450
451/**
452 * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
453 * @hw: pointer to hardware structure
454 * @index: Receive address register to write
455 * @addr: Address to put into receive address register
456 * @vmdq: Unused in this implementation
457 *
458 * We don't really allow setting the device MAC address. However,
459 * if the address being set is the permanent MAC address we will
460 * permit that.
461 **/
462static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
463 u32 vmdq)
464{
465 if (ether_addr_equal(addr, hw->mac.perm_addr))
466 return 0;
467
468 return -EOPNOTSUPP;
469}
470
471/**
472 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
473 * @hw: pointer to the HW structure
474 * @netdev: pointer to net device structure
475 *
476 * Updates the Multicast Table Array.
477 **/
478static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
479 struct net_device *netdev)
480{
481 struct netdev_hw_addr *ha;
482 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
483 u16 *vector_list = (u16 *)&msgbuf[1];
484 u32 cnt, i;
485
486 /* Each entry in the list uses 1 16 bit word. We have 30
487 * 16 bit words available in our HW msg buffer (minus 1 for the
488 * msg type). That's 30 hash values if we pack 'em right. If
489 * there are more than 30 MC addresses to add then punt the
490 * extras for now and then add code to handle more than 30 later.
491 * It would be unusual for a server to request that many multi-cast
492 * addresses except for in large enterprise network environments.
493 */
494
495 cnt = netdev_mc_count(netdev);
496 if (cnt > 30)
497 cnt = 30;
498 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
499 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
500
501 i = 0;
502 netdev_for_each_mc_addr(ha, netdev) {
503 if (i == cnt)
504 break;
505 if (is_link_local_ether_addr(ha->addr))
506 continue;
507
508 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
509 }
510
511 return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
512 IXGBE_VFMAILBOX_SIZE);
513}
514
515/**
516 * Hyper-V variant - just a stub.
517 * @hw: unused
518 * @netdev: unused
519 */
520static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
521 struct net_device *netdev)
522{
523 return -EOPNOTSUPP;
524}
525
526/**
527 * ixgbevf_update_xcast_mode - Update Multicast mode
528 * @hw: pointer to the HW structure
529 * @xcast_mode: new multicast mode
530 *
531 * Updates the Multicast Mode of VF.
532 **/
533static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
534{
535 u32 msgbuf[2];
536 s32 err;
537
538 switch (hw->api_version) {
539 case ixgbe_mbox_api_12:
540 /* promisc introduced in 1.3 version */
541 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
542 return -EOPNOTSUPP;
543 fallthrough;
544 case ixgbe_mbox_api_14:
545 case ixgbe_mbox_api_13:
546 break;
547 default:
548 return -EOPNOTSUPP;
549 }
550
551 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
552 msgbuf[1] = xcast_mode;
553
554 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
555 ARRAY_SIZE(msgbuf));
556 if (err)
557 return err;
558
559 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
560 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
561 return -EPERM;
562
563 return 0;
564}
565
566/**
567 * Hyper-V variant - just a stub.
568 * @hw: unused
569 * @xcast_mode: unused
570 */
571static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
572{
573 return -EOPNOTSUPP;
574}
575
576/**
577 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
578 * @hw: pointer to the HW structure
579 * @vlan: 12 bit VLAN ID
580 * @vind: unused by VF drivers
581 * @vlan_on: if true then set bit, else clear bit
582 **/
583static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
584 bool vlan_on)
585{
586 u32 msgbuf[2];
587 s32 err;
588
589 msgbuf[0] = IXGBE_VF_SET_VLAN;
590 msgbuf[1] = vlan;
591 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
592 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
593
594 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
595 ARRAY_SIZE(msgbuf));
596 if (err)
597 goto mbx_err;
598
599 /* remove extra bits from the message */
600 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
601 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
602
603 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
604 err = IXGBE_ERR_INVALID_ARGUMENT;
605
606mbx_err:
607 return err;
608}
609
610/**
611 * Hyper-V variant - just a stub.
612 * @hw: unused
613 * @vlan: unused
614 * @vind: unused
615 * @vlan_on: unused
616 */
617static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
618 bool vlan_on)
619{
620 return -EOPNOTSUPP;
621}
622
623/**
624 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
625 * @hw: pointer to hardware structure
626 * @speed: Unused in this implementation
627 * @autoneg: Unused in this implementation
628 * @autoneg_wait_to_complete: Unused in this implementation
629 *
630 * Do nothing and return success. VF drivers are not allowed to change
631 * global settings. Maintained for driver compatibility.
632 **/
633static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
634 ixgbe_link_speed speed, bool autoneg,
635 bool autoneg_wait_to_complete)
636{
637 return 0;
638}
639
640/**
641 * ixgbevf_check_mac_link_vf - Get link/speed status
642 * @hw: pointer to hardware structure
643 * @speed: pointer to link speed
644 * @link_up: true is link is up, false otherwise
645 * @autoneg_wait_to_complete: unused
646 *
647 * Reads the links register to determine if link is up and the current speed
648 **/
649static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
650 ixgbe_link_speed *speed,
651 bool *link_up,
652 bool autoneg_wait_to_complete)
653{
654 struct ixgbe_mbx_info *mbx = &hw->mbx;
655 struct ixgbe_mac_info *mac = &hw->mac;
656 s32 ret_val = 0;
657 u32 links_reg;
658 u32 in_msg = 0;
659
660 /* If we were hit with a reset drop the link */
661 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
662 mac->get_link_status = true;
663
664 if (!mac->get_link_status)
665 goto out;
666
667 /* if link status is down no point in checking to see if pf is up */
668 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
669 if (!(links_reg & IXGBE_LINKS_UP))
670 goto out;
671
672 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
673 * before the link status is correct
674 */
675 if (mac->type == ixgbe_mac_82599_vf) {
676 int i;
677
678 for (i = 0; i < 5; i++) {
679 udelay(100);
680 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
681
682 if (!(links_reg & IXGBE_LINKS_UP))
683 goto out;
684 }
685 }
686
687 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
688 case IXGBE_LINKS_SPEED_10G_82599:
689 *speed = IXGBE_LINK_SPEED_10GB_FULL;
690 break;
691 case IXGBE_LINKS_SPEED_1G_82599:
692 *speed = IXGBE_LINK_SPEED_1GB_FULL;
693 break;
694 case IXGBE_LINKS_SPEED_100_82599:
695 *speed = IXGBE_LINK_SPEED_100_FULL;
696 break;
697 }
698
699 /* if the read failed it could just be a mailbox collision, best wait
700 * until we are called again and don't report an error
701 */
702 if (mbx->ops.read(hw, &in_msg, 1))
703 goto out;
704
705 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
706 /* msg is not CTS and is NACK we must have lost CTS status */
707 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
708 ret_val = -1;
709 goto out;
710 }
711
712 /* the pf is talking, if we timed out in the past we reinit */
713 if (!mbx->timeout) {
714 ret_val = -1;
715 goto out;
716 }
717
718 /* if we passed all the tests above then the link is up and we no
719 * longer need to check for link
720 */
721 mac->get_link_status = false;
722
723out:
724 *link_up = !mac->get_link_status;
725 return ret_val;
726}
727
728/**
729 * Hyper-V variant; there is no mailbox communication.
730 * @hw: pointer to private hardware struct
731 * @speed: pointer to link speed
732 * @link_up: true is link is up, false otherwise
733 * @autoneg_wait_to_complete: unused
734 */
735static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
736 ixgbe_link_speed *speed,
737 bool *link_up,
738 bool autoneg_wait_to_complete)
739{
740 struct ixgbe_mbx_info *mbx = &hw->mbx;
741 struct ixgbe_mac_info *mac = &hw->mac;
742 u32 links_reg;
743
744 /* If we were hit with a reset drop the link */
745 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
746 mac->get_link_status = true;
747
748 if (!mac->get_link_status)
749 goto out;
750
751 /* if link status is down no point in checking to see if pf is up */
752 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
753 if (!(links_reg & IXGBE_LINKS_UP))
754 goto out;
755
756 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
757 * before the link status is correct
758 */
759 if (mac->type == ixgbe_mac_82599_vf) {
760 int i;
761
762 for (i = 0; i < 5; i++) {
763 udelay(100);
764 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
765
766 if (!(links_reg & IXGBE_LINKS_UP))
767 goto out;
768 }
769 }
770
771 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
772 case IXGBE_LINKS_SPEED_10G_82599:
773 *speed = IXGBE_LINK_SPEED_10GB_FULL;
774 break;
775 case IXGBE_LINKS_SPEED_1G_82599:
776 *speed = IXGBE_LINK_SPEED_1GB_FULL;
777 break;
778 case IXGBE_LINKS_SPEED_100_82599:
779 *speed = IXGBE_LINK_SPEED_100_FULL;
780 break;
781 }
782
783 /* if we passed all the tests above then the link is up and we no
784 * longer need to check for link
785 */
786 mac->get_link_status = false;
787
788out:
789 *link_up = !mac->get_link_status;
790 return 0;
791}
792
793/**
794 * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
795 * @hw: pointer to the HW structure
796 * @max_size: value to assign to max frame size
797 **/
798static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
799{
800 u32 msgbuf[2];
801 s32 ret_val;
802
803 msgbuf[0] = IXGBE_VF_SET_LPE;
804 msgbuf[1] = max_size;
805
806 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
807 ARRAY_SIZE(msgbuf));
808 if (ret_val)
809 return ret_val;
810 if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
811 (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
812 return IXGBE_ERR_MBX;
813
814 return 0;
815}
816
817/**
818 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
819 * @hw: pointer to the HW structure
820 * @max_size: value to assign to max frame size
821 * Hyper-V variant.
822 **/
823static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
824{
825 u32 reg;
826
827 /* If we are on Hyper-V, we implement this functionality
828 * differently.
829 */
830 reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
831 /* CRC == 4 */
832 reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
833 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
834
835 return 0;
836}
837
838/**
839 * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
840 * @hw: pointer to the HW structure
841 * @api: integer containing requested API version
842 **/
843static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
844{
845 int err;
846 u32 msg[3];
847
848 /* Negotiate the mailbox API version */
849 msg[0] = IXGBE_VF_API_NEGOTIATE;
850 msg[1] = api;
851 msg[2] = 0;
852
853 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
854 if (!err) {
855 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
856
857 /* Store value and return 0 on success */
858 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
859 hw->api_version = api;
860 return 0;
861 }
862
863 err = IXGBE_ERR_INVALID_ARGUMENT;
864 }
865
866 return err;
867}
868
869/**
870 * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
871 * @hw: pointer to the HW structure
872 * @api: integer containing requested API version
873 * Hyper-V version - only ixgbe_mbox_api_10 supported.
874 **/
875static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
876{
877 /* Hyper-V only supports api version ixgbe_mbox_api_10 */
878 if (api != ixgbe_mbox_api_10)
879 return IXGBE_ERR_INVALID_ARGUMENT;
880
881 return 0;
882}
883
884int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
885 unsigned int *default_tc)
886{
887 int err;
888 u32 msg[5];
889
890 /* do nothing if API doesn't support ixgbevf_get_queues */
891 switch (hw->api_version) {
892 case ixgbe_mbox_api_11:
893 case ixgbe_mbox_api_12:
894 case ixgbe_mbox_api_13:
895 case ixgbe_mbox_api_14:
896 break;
897 default:
898 return 0;
899 }
900
901 /* Fetch queue configuration from the PF */
902 msg[0] = IXGBE_VF_GET_QUEUE;
903 msg[1] = msg[2] = msg[3] = msg[4] = 0;
904
905 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
906 if (!err) {
907 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
908
909 /* if we we didn't get an ACK there must have been
910 * some sort of mailbox error so we should treat it
911 * as such
912 */
913 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
914 return IXGBE_ERR_MBX;
915
916 /* record and validate values from message */
917 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
918 if (hw->mac.max_tx_queues == 0 ||
919 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
920 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
921
922 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
923 if (hw->mac.max_rx_queues == 0 ||
924 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
925 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
926
927 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
928 /* in case of unknown state assume we cannot tag frames */
929 if (*num_tcs > hw->mac.max_rx_queues)
930 *num_tcs = 1;
931
932 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
933 /* default to queue 0 on out-of-bounds queue number */
934 if (*default_tc >= hw->mac.max_tx_queues)
935 *default_tc = 0;
936 }
937
938 return err;
939}
940
941static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
942 .init_hw = ixgbevf_init_hw_vf,
943 .reset_hw = ixgbevf_reset_hw_vf,
944 .start_hw = ixgbevf_start_hw_vf,
945 .get_mac_addr = ixgbevf_get_mac_addr_vf,
946 .stop_adapter = ixgbevf_stop_hw_vf,
947 .setup_link = ixgbevf_setup_mac_link_vf,
948 .check_link = ixgbevf_check_mac_link_vf,
949 .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
950 .set_rar = ixgbevf_set_rar_vf,
951 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
952 .update_xcast_mode = ixgbevf_update_xcast_mode,
953 .set_uc_addr = ixgbevf_set_uc_addr_vf,
954 .set_vfta = ixgbevf_set_vfta_vf,
955 .set_rlpml = ixgbevf_set_rlpml_vf,
956};
957
958static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
959 .init_hw = ixgbevf_init_hw_vf,
960 .reset_hw = ixgbevf_hv_reset_hw_vf,
961 .start_hw = ixgbevf_start_hw_vf,
962 .get_mac_addr = ixgbevf_get_mac_addr_vf,
963 .stop_adapter = ixgbevf_stop_hw_vf,
964 .setup_link = ixgbevf_setup_mac_link_vf,
965 .check_link = ixgbevf_hv_check_mac_link_vf,
966 .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
967 .set_rar = ixgbevf_hv_set_rar_vf,
968 .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
969 .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
970 .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
971 .set_vfta = ixgbevf_hv_set_vfta_vf,
972 .set_rlpml = ixgbevf_hv_set_rlpml_vf,
973};
974
975const struct ixgbevf_info ixgbevf_82599_vf_info = {
976 .mac = ixgbe_mac_82599_vf,
977 .mac_ops = &ixgbevf_mac_ops,
978};
979
980const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
981 .mac = ixgbe_mac_82599_vf,
982 .mac_ops = &ixgbevf_hv_mac_ops,
983};
984
985const struct ixgbevf_info ixgbevf_X540_vf_info = {
986 .mac = ixgbe_mac_X540_vf,
987 .mac_ops = &ixgbevf_mac_ops,
988};
989
990const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
991 .mac = ixgbe_mac_X540_vf,
992 .mac_ops = &ixgbevf_hv_mac_ops,
993};
994
995const struct ixgbevf_info ixgbevf_X550_vf_info = {
996 .mac = ixgbe_mac_X550_vf,
997 .mac_ops = &ixgbevf_mac_ops,
998};
999
1000const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1001 .mac = ixgbe_mac_X550_vf,
1002 .mac_ops = &ixgbevf_hv_mac_ops,
1003};
1004
1005const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1006 .mac = ixgbe_mac_X550EM_x_vf,
1007 .mac_ops = &ixgbevf_mac_ops,
1008};
1009
1010const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1011 .mac = ixgbe_mac_X550EM_x_vf,
1012 .mac_ops = &ixgbevf_hv_mac_ops,
1013};
1014
1015const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1016 .mac = ixgbe_mac_x550em_a_vf,
1017 .mac_ops = &ixgbevf_mac_ops,
1018};