Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*******************************************************************************
   2
   3  Intel 82599 Virtual Function driver
   4  Copyright(c) 1999 - 2015 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, see <http://www.gnu.org/licenses/>.
  17
  18  The full GNU General Public License is included in this distribution in
  19  the file called "COPYING".
  20
  21  Contact Information:
  22  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24
  25*******************************************************************************/
  26
  27#include "vf.h"
  28#include "ixgbevf.h"
  29
  30/* On Hyper-V, to reset, we need to read from this offset
  31 * from the PCI config space. This is the mechanism used on
  32 * Hyper-V to support PF/VF communication.
  33 */
  34#define IXGBE_HV_RESET_OFFSET           0x201
  35
  36static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
  37					     u32 *retmsg, u16 size)
  38{
  39	struct ixgbe_mbx_info *mbx = &hw->mbx;
  40	s32 retval = mbx->ops.write_posted(hw, msg, size);
  41
  42	if (retval)
  43		return retval;
  44
  45	return mbx->ops.read_posted(hw, retmsg, size);
  46}
  47
  48/**
  49 *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  50 *  @hw: pointer to hardware structure
  51 *
  52 *  Starts the hardware by filling the bus info structure and media type, clears
  53 *  all on chip counters, initializes receive address registers, multicast
  54 *  table, VLAN filter table, calls routine to set up link and flow control
  55 *  settings, and leaves transmit and receive units disabled and uninitialized
  56 **/
  57static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
  58{
  59	/* Clear adapter stopped flag */
  60	hw->adapter_stopped = false;
  61
  62	return 0;
  63}
  64
  65/**
  66 *  ixgbevf_init_hw_vf - virtual function hardware initialization
  67 *  @hw: pointer to hardware structure
  68 *
  69 *  Initialize the hardware by resetting the hardware and then starting
  70 *  the hardware
  71 **/
  72static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  73{
  74	s32 status = hw->mac.ops.start_hw(hw);
  75
  76	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
  77
  78	return status;
  79}
  80
  81/**
  82 *  ixgbevf_reset_hw_vf - Performs hardware reset
  83 *  @hw: pointer to hardware structure
  84 *
  85 *  Resets the hardware by resetting the transmit and receive units, masks and
  86 *  clears all interrupts.
  87 **/
  88static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
  89{
  90	struct ixgbe_mbx_info *mbx = &hw->mbx;
  91	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
  92	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
  93	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
  94	u8 *addr = (u8 *)(&msgbuf[1]);
  95
  96	/* Call adapter stop to disable tx/rx and clear interrupts */
  97	hw->mac.ops.stop_adapter(hw);
  98
  99	/* reset the api version */
 100	hw->api_version = ixgbe_mbox_api_10;
 101
 102	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
 103	IXGBE_WRITE_FLUSH(hw);
 104
 105	/* we cannot reset while the RSTI / RSTD bits are asserted */
 106	while (!mbx->ops.check_for_rst(hw) && timeout) {
 107		timeout--;
 108		udelay(5);
 109	}
 110
 111	if (!timeout)
 112		return IXGBE_ERR_RESET_FAILED;
 113
 114	/* mailbox timeout can now become active */
 115	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
 116
 117	msgbuf[0] = IXGBE_VF_RESET;
 118	mbx->ops.write_posted(hw, msgbuf, 1);
 119
 120	mdelay(10);
 121
 122	/* set our "perm_addr" based on info provided by PF
 123	 * also set up the mc_filter_type which is piggy backed
 124	 * on the mac address in word 3
 125	 */
 126	ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
 127	if (ret_val)
 128		return ret_val;
 129
 130	/* New versions of the PF may NACK the reset return message
 131	 * to indicate that no MAC address has yet been assigned for
 132	 * the VF.
 133	 */
 134	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
 135	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
 136		return IXGBE_ERR_INVALID_MAC_ADDR;
 137
 138	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
 139		ether_addr_copy(hw->mac.perm_addr, addr);
 140
 141	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 142
 143	return 0;
 144}
 145
 146/**
 147 * Hyper-V variant; the VF/PF communication is through the PCI
 148 * config space.
 
 149 */
 150static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
 151{
 152#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
 153	struct ixgbevf_adapter *adapter = hw->back;
 154	int i;
 155
 156	for (i = 0; i < 6; i++)
 157		pci_read_config_byte(adapter->pdev,
 158				     (i + IXGBE_HV_RESET_OFFSET),
 159				     &hw->mac.perm_addr[i]);
 160	return 0;
 161#else
 162	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
 163	return -EOPNOTSUPP;
 164#endif
 165}
 166
 167/**
 168 *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
 169 *  @hw: pointer to hardware structure
 170 *
 171 *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
 172 *  disables transmit and receive units. The adapter_stopped flag is used by
 173 *  the shared code and drivers to determine if the adapter is in a stopped
 174 *  state and should not touch the hardware.
 175 **/
 176static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
 177{
 178	u32 number_of_queues;
 179	u32 reg_val;
 180	u16 i;
 181
 182	/* Set the adapter_stopped flag so other driver functions stop touching
 183	 * the hardware
 184	 */
 185	hw->adapter_stopped = true;
 186
 187	/* Disable the receive unit by stopped each queue */
 188	number_of_queues = hw->mac.max_rx_queues;
 189	for (i = 0; i < number_of_queues; i++) {
 190		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 191		if (reg_val & IXGBE_RXDCTL_ENABLE) {
 192			reg_val &= ~IXGBE_RXDCTL_ENABLE;
 193			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
 194		}
 195	}
 196
 197	IXGBE_WRITE_FLUSH(hw);
 198
 199	/* Clear interrupt mask to stop from interrupts being generated */
 200	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
 201
 202	/* Clear any pending interrupts */
 203	IXGBE_READ_REG(hw, IXGBE_VTEICR);
 204
 205	/* Disable the transmit unit.  Each queue must be disabled. */
 206	number_of_queues = hw->mac.max_tx_queues;
 207	for (i = 0; i < number_of_queues; i++) {
 208		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 209		if (reg_val & IXGBE_TXDCTL_ENABLE) {
 210			reg_val &= ~IXGBE_TXDCTL_ENABLE;
 211			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
 212		}
 213	}
 214
 215	return 0;
 216}
 217
 218/**
 219 *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
 220 *  @hw: pointer to hardware structure
 221 *  @mc_addr: the multicast address
 222 *
 223 *  Extracts the 12 bits, from a multicast address, to determine which
 224 *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
 225 *  incoming Rx multicast addresses, to determine the bit-vector to check in
 226 *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
 227 *  by the MO field of the MCSTCTRL. The MO field is set during initialization
 228 *  to mc_filter_type.
 229 **/
 230static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
 231{
 232	u32 vector = 0;
 233
 234	switch (hw->mac.mc_filter_type) {
 235	case 0:   /* use bits [47:36] of the address */
 236		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
 237		break;
 238	case 1:   /* use bits [46:35] of the address */
 239		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
 240		break;
 241	case 2:   /* use bits [45:34] of the address */
 242		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
 243		break;
 244	case 3:   /* use bits [43:32] of the address */
 245		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
 246		break;
 247	default:  /* Invalid mc_filter_type */
 248		break;
 249	}
 250
 251	/* vector can only be 12-bits or boundary will be exceeded */
 252	vector &= 0xFFF;
 253	return vector;
 254}
 255
 256/**
 257 *  ixgbevf_get_mac_addr_vf - Read device MAC address
 258 *  @hw: pointer to the HW structure
 259 *  @mac_addr: pointer to storage for retrieved MAC address
 260 **/
 261static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 262{
 263	ether_addr_copy(mac_addr, hw->mac.perm_addr);
 264
 265	return 0;
 266}
 267
 268static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 269{
 270	u32 msgbuf[3], msgbuf_chk;
 271	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 272	s32 ret_val;
 273
 274	memset(msgbuf, 0, sizeof(msgbuf));
 275	/* If index is one then this is the start of a new list and needs
 276	 * indication to the PF so it can do it's own list management.
 277	 * If it is zero then that tells the PF to just clear all of
 278	 * this VF's macvlans and there is no new list.
 279	 */
 280	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
 281	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
 282	msgbuf_chk = msgbuf[0];
 283
 284	if (addr)
 285		ether_addr_copy(msg_addr, addr);
 286
 287	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 288					     sizeof(msgbuf) / sizeof(u32));
 289	if (!ret_val) {
 290		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 291
 292		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
 293			return -ENOMEM;
 294	}
 295
 296	return ret_val;
 297}
 298
 299static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 300{
 301	return -EOPNOTSUPP;
 302}
 303
 304/**
 305 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
 306 * @adapter: pointer to the port handle
 307 * @reta: buffer to fill with RETA contents.
 308 * @num_rx_queues: Number of Rx queues configured for this port
 309 *
 310 * The "reta" buffer should be big enough to contain 32 registers.
 311 *
 312 * Returns: 0 on success.
 313 *          if API doesn't support this operation - (-EOPNOTSUPP).
 314 */
 315int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
 316{
 317	int err, i, j;
 318	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 319	u32 *hw_reta = &msgbuf[1];
 320	u32 mask = 0;
 321
 322	/* We have to use a mailbox for 82599 and x540 devices only.
 323	 * For these devices RETA has 128 entries.
 324	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
 325	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
 326	 */
 327	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
 328
 329	/* We support the RSS querying for 82599 and x540 devices only.
 330	 * Thus return an error if API doesn't support RETA querying or querying
 331	 * is not supported for this device type.
 332	 */
 333	if (hw->api_version != ixgbe_mbox_api_12 ||
 334	    hw->mac.type >= ixgbe_mac_X550_vf)
 
 
 
 
 
 
 335		return -EOPNOTSUPP;
 
 336
 337	msgbuf[0] = IXGBE_VF_GET_RETA;
 338
 339	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 340
 341	if (err)
 342		return err;
 343
 344	err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
 345
 346	if (err)
 347		return err;
 348
 349	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 350
 351	/* If the operation has been refused by a PF return -EPERM */
 352	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
 353		return -EPERM;
 354
 355	/* If we didn't get an ACK there must have been
 356	 * some sort of mailbox error so we should treat it
 357	 * as such.
 358	 */
 359	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
 360		return IXGBE_ERR_MBX;
 361
 362	/* ixgbevf doesn't support more than 2 queues at the moment */
 363	if (num_rx_queues > 1)
 364		mask = 0x1;
 365
 366	for (i = 0; i < dwords; i++)
 367		for (j = 0; j < 16; j++)
 368			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
 369
 370	return 0;
 371}
 372
 373/**
 374 * ixgbevf_get_rss_key_locked - get the RSS Random Key
 375 * @hw: pointer to the HW structure
 376 * @rss_key: buffer to fill with RSS Hash Key contents.
 377 *
 378 * The "rss_key" buffer should be big enough to contain 10 registers.
 379 *
 380 * Returns: 0 on success.
 381 *          if API doesn't support this operation - (-EOPNOTSUPP).
 382 */
 383int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
 384{
 385	int err;
 386	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 387
 388	/* We currently support the RSS Random Key retrieval for 82599 and x540
 389	 * devices only.
 390	 *
 391	 * Thus return an error if API doesn't support RSS Random Key retrieval
 392	 * or if the operation is not supported for this device type.
 393	 */
 394	if (hw->api_version != ixgbe_mbox_api_12 ||
 395	    hw->mac.type >= ixgbe_mac_X550_vf)
 
 
 
 
 
 
 396		return -EOPNOTSUPP;
 
 397
 398	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
 399	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 400
 401	if (err)
 402		return err;
 403
 404	err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
 405
 406	if (err)
 407		return err;
 408
 409	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 410
 411	/* If the operation has been refused by a PF return -EPERM */
 412	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
 413		return -EPERM;
 414
 415	/* If we didn't get an ACK there must have been
 416	 * some sort of mailbox error so we should treat it
 417	 * as such.
 418	 */
 419	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
 420		return IXGBE_ERR_MBX;
 421
 422	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
 423
 424	return 0;
 425}
 426
 427/**
 428 *  ixgbevf_set_rar_vf - set device MAC address
 429 *  @hw: pointer to hardware structure
 430 *  @index: Receive address register to write
 431 *  @addr: Address to put into receive address register
 432 *  @vmdq: Unused in this implementation
 433 **/
 434static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 435			      u32 vmdq)
 436{
 437	u32 msgbuf[3];
 438	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 439	s32 ret_val;
 440
 441	memset(msgbuf, 0, sizeof(msgbuf));
 442	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
 443	ether_addr_copy(msg_addr, addr);
 444
 445	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 446					     sizeof(msgbuf) / sizeof(u32));
 447
 448	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 449
 450	/* if nacked the address was rejected, use "perm_addr" */
 451	if (!ret_val &&
 452	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
 453		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
 454		return IXGBE_ERR_MBX;
 455	}
 456
 457	return ret_val;
 458}
 459
 460/**
 461 *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
 462 *  @hw: pointer to hardware structure
 463 *  @index: Receive address register to write
 464 *  @addr: Address to put into receive address register
 465 *  @vmdq: Unused in this implementation
 466 *
 467 * We don't really allow setting the device MAC address. However,
 468 * if the address being set is the permanent MAC address we will
 469 * permit that.
 470 **/
 471static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 472				 u32 vmdq)
 473{
 474	if (ether_addr_equal(addr, hw->mac.perm_addr))
 475		return 0;
 476
 477	return -EOPNOTSUPP;
 478}
 479
 480/**
 481 *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
 482 *  @hw: pointer to the HW structure
 483 *  @netdev: pointer to net device structure
 484 *
 485 *  Updates the Multicast Table Array.
 486 **/
 487static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 488					  struct net_device *netdev)
 489{
 490	struct netdev_hw_addr *ha;
 491	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 492	u16 *vector_list = (u16 *)&msgbuf[1];
 493	u32 cnt, i;
 494
 495	/* Each entry in the list uses 1 16 bit word.  We have 30
 496	 * 16 bit words available in our HW msg buffer (minus 1 for the
 497	 * msg type).  That's 30 hash values if we pack 'em right.  If
 498	 * there are more than 30 MC addresses to add then punt the
 499	 * extras for now and then add code to handle more than 30 later.
 500	 * It would be unusual for a server to request that many multi-cast
 501	 * addresses except for in large enterprise network environments.
 502	 */
 503
 504	cnt = netdev_mc_count(netdev);
 505	if (cnt > 30)
 506		cnt = 30;
 507	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
 508	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
 509
 510	i = 0;
 511	netdev_for_each_mc_addr(ha, netdev) {
 512		if (i == cnt)
 513			break;
 514		if (is_link_local_ether_addr(ha->addr))
 515			continue;
 516
 517		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
 518	}
 519
 520	ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
 521
 522	return 0;
 523}
 524
 525/**
 526 * Hyper-V variant - just a stub.
 
 
 527 */
 528static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 529					     struct net_device *netdev)
 530{
 531	return -EOPNOTSUPP;
 532}
 533
 534/**
 535 *  ixgbevf_update_xcast_mode - Update Multicast mode
 536 *  @hw: pointer to the HW structure
 537 *  @xcast_mode: new multicast mode
 538 *
 539 *  Updates the Multicast Mode of VF.
 540 **/
 541static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 542{
 543	u32 msgbuf[2];
 544	s32 err;
 545
 546	switch (hw->api_version) {
 547	case ixgbe_mbox_api_12:
 
 
 
 
 
 
 548		break;
 549	default:
 550		return -EOPNOTSUPP;
 551	}
 552
 553	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
 554	msgbuf[1] = xcast_mode;
 555
 556	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 557					 sizeof(msgbuf) / sizeof(u32));
 558	if (err)
 559		return err;
 560
 561	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 562	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
 563		return -EPERM;
 564
 565	return 0;
 566}
 567
 568/**
 569 * Hyper-V variant - just a stub.
 
 
 570 */
 571static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 572{
 573	return -EOPNOTSUPP;
 574}
 575
 576/**
 577 *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
 578 *  @hw: pointer to the HW structure
 579 *  @vlan: 12 bit VLAN ID
 580 *  @vind: unused by VF drivers
 581 *  @vlan_on: if true then set bit, else clear bit
 582 **/
 583static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 584			       bool vlan_on)
 585{
 586	u32 msgbuf[2];
 587	s32 err;
 588
 589	msgbuf[0] = IXGBE_VF_SET_VLAN;
 590	msgbuf[1] = vlan;
 591	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
 592	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 593
 594	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 595					 sizeof(msgbuf) / sizeof(u32));
 596	if (err)
 597		goto mbx_err;
 598
 599	/* remove extra bits from the message */
 600	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 601	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
 602
 603	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
 604		err = IXGBE_ERR_INVALID_ARGUMENT;
 605
 606mbx_err:
 607	return err;
 608}
 609
 610/**
 611 * Hyper-V variant - just a stub.
 
 
 
 
 612 */
 613static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 614				  bool vlan_on)
 615{
 616	return -EOPNOTSUPP;
 617}
 618
 619/**
 620 *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
 621 *  @hw: pointer to hardware structure
 622 *  @speed: Unused in this implementation
 623 *  @autoneg: Unused in this implementation
 624 *  @autoneg_wait_to_complete: Unused in this implementation
 625 *
 626 *  Do nothing and return success.  VF drivers are not allowed to change
 627 *  global settings.  Maintained for driver compatibility.
 628 **/
 629static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
 630				     ixgbe_link_speed speed, bool autoneg,
 631				     bool autoneg_wait_to_complete)
 632{
 633	return 0;
 634}
 635
 636/**
 637 *  ixgbevf_check_mac_link_vf - Get link/speed status
 638 *  @hw: pointer to hardware structure
 639 *  @speed: pointer to link speed
 640 *  @link_up: true is link is up, false otherwise
 641 *  @autoneg_wait_to_complete: true when waiting for completion is needed
 642 *
 643 *  Reads the links register to determine if link is up and the current speed
 644 **/
 645static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
 646				     ixgbe_link_speed *speed,
 647				     bool *link_up,
 648				     bool autoneg_wait_to_complete)
 649{
 650	struct ixgbe_mbx_info *mbx = &hw->mbx;
 651	struct ixgbe_mac_info *mac = &hw->mac;
 652	s32 ret_val = 0;
 653	u32 links_reg;
 654	u32 in_msg = 0;
 655
 656	/* If we were hit with a reset drop the link */
 657	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 658		mac->get_link_status = true;
 659
 660	if (!mac->get_link_status)
 661		goto out;
 662
 663	/* if link status is down no point in checking to see if pf is up */
 664	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 665	if (!(links_reg & IXGBE_LINKS_UP))
 666		goto out;
 667
 668	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 669	 * before the link status is correct
 670	 */
 671	if (mac->type == ixgbe_mac_82599_vf) {
 672		int i;
 673
 674		for (i = 0; i < 5; i++) {
 675			udelay(100);
 676			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 677
 678			if (!(links_reg & IXGBE_LINKS_UP))
 679				goto out;
 680		}
 681	}
 682
 683	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 684	case IXGBE_LINKS_SPEED_10G_82599:
 685		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 686		break;
 687	case IXGBE_LINKS_SPEED_1G_82599:
 688		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 689		break;
 690	case IXGBE_LINKS_SPEED_100_82599:
 691		*speed = IXGBE_LINK_SPEED_100_FULL;
 692		break;
 693	}
 694
 695	/* if the read failed it could just be a mailbox collision, best wait
 696	 * until we are called again and don't report an error
 697	 */
 698	if (mbx->ops.read(hw, &in_msg, 1))
 699		goto out;
 700
 701	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
 702		/* msg is not CTS and is NACK we must have lost CTS status */
 703		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
 704			ret_val = -1;
 705		goto out;
 706	}
 707
 708	/* the pf is talking, if we timed out in the past we reinit */
 709	if (!mbx->timeout) {
 710		ret_val = -1;
 711		goto out;
 712	}
 713
 714	/* if we passed all the tests above then the link is up and we no
 715	 * longer need to check for link
 716	 */
 717	mac->get_link_status = false;
 718
 719out:
 720	*link_up = !mac->get_link_status;
 721	return ret_val;
 722}
 723
 724/**
 725 * Hyper-V variant; there is no mailbox communication.
 
 
 
 
 726 */
 727static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
 728					ixgbe_link_speed *speed,
 729					bool *link_up,
 730					bool autoneg_wait_to_complete)
 731{
 732	struct ixgbe_mbx_info *mbx = &hw->mbx;
 733	struct ixgbe_mac_info *mac = &hw->mac;
 734	u32 links_reg;
 735
 736	/* If we were hit with a reset drop the link */
 737	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 738		mac->get_link_status = true;
 739
 740	if (!mac->get_link_status)
 741		goto out;
 742
 743	/* if link status is down no point in checking to see if pf is up */
 744	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 745	if (!(links_reg & IXGBE_LINKS_UP))
 746		goto out;
 747
 748	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 749	 * before the link status is correct
 750	 */
 751	if (mac->type == ixgbe_mac_82599_vf) {
 752		int i;
 753
 754		for (i = 0; i < 5; i++) {
 755			udelay(100);
 756			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 757
 758			if (!(links_reg & IXGBE_LINKS_UP))
 759				goto out;
 760		}
 761	}
 762
 763	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 764	case IXGBE_LINKS_SPEED_10G_82599:
 765		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 766		break;
 767	case IXGBE_LINKS_SPEED_1G_82599:
 768		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 769		break;
 770	case IXGBE_LINKS_SPEED_100_82599:
 771		*speed = IXGBE_LINK_SPEED_100_FULL;
 772		break;
 773	}
 774
 775	/* if we passed all the tests above then the link is up and we no
 776	 * longer need to check for link
 777	 */
 778	mac->get_link_status = false;
 779
 780out:
 781	*link_up = !mac->get_link_status;
 782	return 0;
 783}
 784
 785/**
 786 *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
 787 *  @hw: pointer to the HW structure
 788 *  @max_size: value to assign to max frame size
 789 **/
 790static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 791{
 792	u32 msgbuf[2];
 793	s32 ret_val;
 794
 795	msgbuf[0] = IXGBE_VF_SET_LPE;
 796	msgbuf[1] = max_size;
 797
 798	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 799					     sizeof(msgbuf) / sizeof(u32));
 800	if (ret_val)
 801		return ret_val;
 802	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
 803	    (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
 804		return IXGBE_ERR_MBX;
 805
 806	return 0;
 807}
 808
 809/**
 810 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
 811 * @hw: pointer to the HW structure
 812 * @max_size: value to assign to max frame size
 813 * Hyper-V variant.
 814 **/
 815static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 816{
 817	u32 reg;
 818
 819	/* If we are on Hyper-V, we implement this functionality
 820	 * differently.
 821	 */
 822	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
 823	/* CRC == 4 */
 824	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
 825	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
 826
 827	return 0;
 828}
 829
 830/**
 831 *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
 832 *  @hw: pointer to the HW structure
 833 *  @api: integer containing requested API version
 834 **/
 835static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 836{
 837	int err;
 838	u32 msg[3];
 839
 840	/* Negotiate the mailbox API version */
 841	msg[0] = IXGBE_VF_API_NEGOTIATE;
 842	msg[1] = api;
 843	msg[2] = 0;
 844
 845	err = ixgbevf_write_msg_read_ack(hw, msg, msg,
 846					 sizeof(msg) / sizeof(u32));
 847	if (!err) {
 848		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 849
 850		/* Store value and return 0 on success */
 851		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
 852			hw->api_version = api;
 853			return 0;
 854		}
 855
 856		err = IXGBE_ERR_INVALID_ARGUMENT;
 857	}
 858
 859	return err;
 860}
 861
 862/**
 863 *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
 864 *  @hw: pointer to the HW structure
 865 *  @api: integer containing requested API version
 866 *  Hyper-V version - only ixgbe_mbox_api_10 supported.
 867 **/
 868static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 869{
 870	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
 871	if (api != ixgbe_mbox_api_10)
 872		return IXGBE_ERR_INVALID_ARGUMENT;
 873
 874	return 0;
 875}
 876
 877int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 878		       unsigned int *default_tc)
 879{
 880	int err;
 881	u32 msg[5];
 882
 883	/* do nothing if API doesn't support ixgbevf_get_queues */
 884	switch (hw->api_version) {
 885	case ixgbe_mbox_api_11:
 886	case ixgbe_mbox_api_12:
 
 
 887		break;
 888	default:
 889		return 0;
 890	}
 891
 892	/* Fetch queue configuration from the PF */
 893	msg[0] = IXGBE_VF_GET_QUEUE;
 894	msg[1] = msg[2] = msg[3] = msg[4] = 0;
 895
 896	err = ixgbevf_write_msg_read_ack(hw, msg, msg,
 897					 sizeof(msg) / sizeof(u32));
 898	if (!err) {
 899		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 900
 901		/* if we we didn't get an ACK there must have been
 902		 * some sort of mailbox error so we should treat it
 903		 * as such
 904		 */
 905		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
 906			return IXGBE_ERR_MBX;
 907
 908		/* record and validate values from message */
 909		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
 910		if (hw->mac.max_tx_queues == 0 ||
 911		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
 912			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
 913
 914		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
 915		if (hw->mac.max_rx_queues == 0 ||
 916		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
 917			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
 918
 919		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
 920		/* in case of unknown state assume we cannot tag frames */
 921		if (*num_tcs > hw->mac.max_rx_queues)
 922			*num_tcs = 1;
 923
 924		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
 925		/* default to queue 0 on out-of-bounds queue number */
 926		if (*default_tc >= hw->mac.max_tx_queues)
 927			*default_tc = 0;
 928	}
 929
 930	return err;
 931}
 932
 933static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
 934	.init_hw		= ixgbevf_init_hw_vf,
 935	.reset_hw		= ixgbevf_reset_hw_vf,
 936	.start_hw		= ixgbevf_start_hw_vf,
 937	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
 938	.stop_adapter		= ixgbevf_stop_hw_vf,
 939	.setup_link		= ixgbevf_setup_mac_link_vf,
 940	.check_link		= ixgbevf_check_mac_link_vf,
 941	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
 942	.set_rar		= ixgbevf_set_rar_vf,
 943	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
 944	.update_xcast_mode	= ixgbevf_update_xcast_mode,
 945	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
 946	.set_vfta		= ixgbevf_set_vfta_vf,
 947	.set_rlpml		= ixgbevf_set_rlpml_vf,
 948};
 949
 950static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
 951	.init_hw		= ixgbevf_init_hw_vf,
 952	.reset_hw		= ixgbevf_hv_reset_hw_vf,
 953	.start_hw		= ixgbevf_start_hw_vf,
 954	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
 955	.stop_adapter		= ixgbevf_stop_hw_vf,
 956	.setup_link		= ixgbevf_setup_mac_link_vf,
 957	.check_link		= ixgbevf_hv_check_mac_link_vf,
 958	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
 959	.set_rar		= ixgbevf_hv_set_rar_vf,
 960	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
 961	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
 962	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
 963	.set_vfta		= ixgbevf_hv_set_vfta_vf,
 964	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
 965};
 966
 967const struct ixgbevf_info ixgbevf_82599_vf_info = {
 968	.mac = ixgbe_mac_82599_vf,
 969	.mac_ops = &ixgbevf_mac_ops,
 970};
 971
 972const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
 973	.mac = ixgbe_mac_82599_vf,
 974	.mac_ops = &ixgbevf_hv_mac_ops,
 975};
 976
 977const struct ixgbevf_info ixgbevf_X540_vf_info = {
 978	.mac = ixgbe_mac_X540_vf,
 979	.mac_ops = &ixgbevf_mac_ops,
 980};
 981
 982const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
 983	.mac = ixgbe_mac_X540_vf,
 984	.mac_ops = &ixgbevf_hv_mac_ops,
 985};
 986
 987const struct ixgbevf_info ixgbevf_X550_vf_info = {
 988	.mac = ixgbe_mac_X550_vf,
 989	.mac_ops = &ixgbevf_mac_ops,
 990};
 991
 992const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
 993	.mac = ixgbe_mac_X550_vf,
 994	.mac_ops = &ixgbevf_hv_mac_ops,
 995};
 996
 997const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
 998	.mac = ixgbe_mac_X550EM_x_vf,
 999	.mac_ops = &ixgbevf_mac_ops,
1000};
1001
1002const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1003	.mac = ixgbe_mac_X550EM_x_vf,
1004	.mac_ops = &ixgbevf_hv_mac_ops,
1005};
1006
1007const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1008	.mac = ixgbe_mac_x550em_a_vf,
1009	.mac_ops = &ixgbevf_mac_ops,
1010};
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   3
   4#include "vf.h"
   5#include "ixgbevf.h"
   6
   7/* On Hyper-V, to reset, we need to read from this offset
   8 * from the PCI config space. This is the mechanism used on
   9 * Hyper-V to support PF/VF communication.
  10 */
  11#define IXGBE_HV_RESET_OFFSET           0x201
  12
  13static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
  14					     u32 *retmsg, u16 size)
  15{
  16	struct ixgbe_mbx_info *mbx = &hw->mbx;
  17	s32 retval = mbx->ops.write_posted(hw, msg, size);
  18
  19	if (retval)
  20		return retval;
  21
  22	return mbx->ops.read_posted(hw, retmsg, size);
  23}
  24
  25/**
  26 *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  27 *  @hw: pointer to hardware structure
  28 *
  29 *  Starts the hardware by filling the bus info structure and media type, clears
  30 *  all on chip counters, initializes receive address registers, multicast
  31 *  table, VLAN filter table, calls routine to set up link and flow control
  32 *  settings, and leaves transmit and receive units disabled and uninitialized
  33 **/
  34static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
  35{
  36	/* Clear adapter stopped flag */
  37	hw->adapter_stopped = false;
  38
  39	return 0;
  40}
  41
  42/**
  43 *  ixgbevf_init_hw_vf - virtual function hardware initialization
  44 *  @hw: pointer to hardware structure
  45 *
  46 *  Initialize the hardware by resetting the hardware and then starting
  47 *  the hardware
  48 **/
  49static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  50{
  51	s32 status = hw->mac.ops.start_hw(hw);
  52
  53	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
  54
  55	return status;
  56}
  57
  58/**
  59 *  ixgbevf_reset_hw_vf - Performs hardware reset
  60 *  @hw: pointer to hardware structure
  61 *
  62 *  Resets the hardware by resetting the transmit and receive units, masks and
  63 *  clears all interrupts.
  64 **/
  65static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
  66{
  67	struct ixgbe_mbx_info *mbx = &hw->mbx;
  68	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
  69	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
  70	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
  71	u8 *addr = (u8 *)(&msgbuf[1]);
  72
  73	/* Call adapter stop to disable tx/rx and clear interrupts */
  74	hw->mac.ops.stop_adapter(hw);
  75
  76	/* reset the api version */
  77	hw->api_version = ixgbe_mbox_api_10;
  78
  79	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
  80	IXGBE_WRITE_FLUSH(hw);
  81
  82	/* we cannot reset while the RSTI / RSTD bits are asserted */
  83	while (!mbx->ops.check_for_rst(hw) && timeout) {
  84		timeout--;
  85		udelay(5);
  86	}
  87
  88	if (!timeout)
  89		return IXGBE_ERR_RESET_FAILED;
  90
  91	/* mailbox timeout can now become active */
  92	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
  93
  94	msgbuf[0] = IXGBE_VF_RESET;
  95	mbx->ops.write_posted(hw, msgbuf, 1);
  96
  97	mdelay(10);
  98
  99	/* set our "perm_addr" based on info provided by PF
 100	 * also set up the mc_filter_type which is piggy backed
 101	 * on the mac address in word 3
 102	 */
 103	ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
 104	if (ret_val)
 105		return ret_val;
 106
 107	/* New versions of the PF may NACK the reset return message
 108	 * to indicate that no MAC address has yet been assigned for
 109	 * the VF.
 110	 */
 111	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
 112	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
 113		return IXGBE_ERR_INVALID_MAC_ADDR;
 114
 115	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
 116		ether_addr_copy(hw->mac.perm_addr, addr);
 117
 118	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 119
 120	return 0;
 121}
 122
 123/**
 124 * Hyper-V variant; the VF/PF communication is through the PCI
 125 * config space.
 126 * @hw: pointer to private hardware struct
 127 */
 128static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
 129{
 130#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
 131	struct ixgbevf_adapter *adapter = hw->back;
 132	int i;
 133
 134	for (i = 0; i < 6; i++)
 135		pci_read_config_byte(adapter->pdev,
 136				     (i + IXGBE_HV_RESET_OFFSET),
 137				     &hw->mac.perm_addr[i]);
 138	return 0;
 139#else
 140	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
 141	return -EOPNOTSUPP;
 142#endif
 143}
 144
 145/**
 146 *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
 147 *  @hw: pointer to hardware structure
 148 *
 149 *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
 150 *  disables transmit and receive units. The adapter_stopped flag is used by
 151 *  the shared code and drivers to determine if the adapter is in a stopped
 152 *  state and should not touch the hardware.
 153 **/
 154static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
 155{
 156	u32 number_of_queues;
 157	u32 reg_val;
 158	u16 i;
 159
 160	/* Set the adapter_stopped flag so other driver functions stop touching
 161	 * the hardware
 162	 */
 163	hw->adapter_stopped = true;
 164
 165	/* Disable the receive unit by stopped each queue */
 166	number_of_queues = hw->mac.max_rx_queues;
 167	for (i = 0; i < number_of_queues; i++) {
 168		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 169		if (reg_val & IXGBE_RXDCTL_ENABLE) {
 170			reg_val &= ~IXGBE_RXDCTL_ENABLE;
 171			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
 172		}
 173	}
 174
 175	IXGBE_WRITE_FLUSH(hw);
 176
 177	/* Clear interrupt mask to stop from interrupts being generated */
 178	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
 179
 180	/* Clear any pending interrupts */
 181	IXGBE_READ_REG(hw, IXGBE_VTEICR);
 182
 183	/* Disable the transmit unit.  Each queue must be disabled. */
 184	number_of_queues = hw->mac.max_tx_queues;
 185	for (i = 0; i < number_of_queues; i++) {
 186		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 187		if (reg_val & IXGBE_TXDCTL_ENABLE) {
 188			reg_val &= ~IXGBE_TXDCTL_ENABLE;
 189			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
 190		}
 191	}
 192
 193	return 0;
 194}
 195
 196/**
 197 *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
 198 *  @hw: pointer to hardware structure
 199 *  @mc_addr: the multicast address
 200 *
 201 *  Extracts the 12 bits, from a multicast address, to determine which
 202 *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
 203 *  incoming Rx multicast addresses, to determine the bit-vector to check in
 204 *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
 205 *  by the MO field of the MCSTCTRL. The MO field is set during initialization
 206 *  to mc_filter_type.
 207 **/
 208static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
 209{
 210	u32 vector = 0;
 211
 212	switch (hw->mac.mc_filter_type) {
 213	case 0:   /* use bits [47:36] of the address */
 214		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
 215		break;
 216	case 1:   /* use bits [46:35] of the address */
 217		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
 218		break;
 219	case 2:   /* use bits [45:34] of the address */
 220		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
 221		break;
 222	case 3:   /* use bits [43:32] of the address */
 223		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
 224		break;
 225	default:  /* Invalid mc_filter_type */
 226		break;
 227	}
 228
 229	/* vector can only be 12-bits or boundary will be exceeded */
 230	vector &= 0xFFF;
 231	return vector;
 232}
 233
 234/**
 235 *  ixgbevf_get_mac_addr_vf - Read device MAC address
 236 *  @hw: pointer to the HW structure
 237 *  @mac_addr: pointer to storage for retrieved MAC address
 238 **/
 239static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 240{
 241	ether_addr_copy(mac_addr, hw->mac.perm_addr);
 242
 243	return 0;
 244}
 245
 246static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 247{
 248	u32 msgbuf[3], msgbuf_chk;
 249	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 250	s32 ret_val;
 251
 252	memset(msgbuf, 0, sizeof(msgbuf));
 253	/* If index is one then this is the start of a new list and needs
 254	 * indication to the PF so it can do it's own list management.
 255	 * If it is zero then that tells the PF to just clear all of
 256	 * this VF's macvlans and there is no new list.
 257	 */
 258	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
 259	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
 260	msgbuf_chk = msgbuf[0];
 261
 262	if (addr)
 263		ether_addr_copy(msg_addr, addr);
 264
 265	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 266					     ARRAY_SIZE(msgbuf));
 267	if (!ret_val) {
 268		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 269
 270		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
 271			return -ENOMEM;
 272	}
 273
 274	return ret_val;
 275}
 276
 277static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 278{
 279	return -EOPNOTSUPP;
 280}
 281
 282/**
 283 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
 284 * @hw: pointer to hardware structure
 285 * @reta: buffer to fill with RETA contents.
 286 * @num_rx_queues: Number of Rx queues configured for this port
 287 *
 288 * The "reta" buffer should be big enough to contain 32 registers.
 289 *
 290 * Returns: 0 on success.
 291 *          if API doesn't support this operation - (-EOPNOTSUPP).
 292 */
 293int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
 294{
 295	int err, i, j;
 296	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 297	u32 *hw_reta = &msgbuf[1];
 298	u32 mask = 0;
 299
 300	/* We have to use a mailbox for 82599 and x540 devices only.
 301	 * For these devices RETA has 128 entries.
 302	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
 303	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
 304	 */
 305	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
 306
 307	/* We support the RSS querying for 82599 and x540 devices only.
 308	 * Thus return an error if API doesn't support RETA querying or querying
 309	 * is not supported for this device type.
 310	 */
 311	switch (hw->api_version) {
 312	case ixgbe_mbox_api_14:
 313	case ixgbe_mbox_api_13:
 314	case ixgbe_mbox_api_12:
 315		if (hw->mac.type < ixgbe_mac_X550_vf)
 316			break;
 317		fallthrough;
 318	default:
 319		return -EOPNOTSUPP;
 320	}
 321
 322	msgbuf[0] = IXGBE_VF_GET_RETA;
 323
 324	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 325
 326	if (err)
 327		return err;
 328
 329	err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
 330
 331	if (err)
 332		return err;
 333
 334	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 335
 336	/* If the operation has been refused by a PF return -EPERM */
 337	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
 338		return -EPERM;
 339
 340	/* If we didn't get an ACK there must have been
 341	 * some sort of mailbox error so we should treat it
 342	 * as such.
 343	 */
 344	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
 345		return IXGBE_ERR_MBX;
 346
 347	/* ixgbevf doesn't support more than 2 queues at the moment */
 348	if (num_rx_queues > 1)
 349		mask = 0x1;
 350
 351	for (i = 0; i < dwords; i++)
 352		for (j = 0; j < 16; j++)
 353			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
 354
 355	return 0;
 356}
 357
 358/**
 359 * ixgbevf_get_rss_key_locked - get the RSS Random Key
 360 * @hw: pointer to the HW structure
 361 * @rss_key: buffer to fill with RSS Hash Key contents.
 362 *
 363 * The "rss_key" buffer should be big enough to contain 10 registers.
 364 *
 365 * Returns: 0 on success.
 366 *          if API doesn't support this operation - (-EOPNOTSUPP).
 367 */
 368int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
 369{
 370	int err;
 371	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 372
 373	/* We currently support the RSS Random Key retrieval for 82599 and x540
 374	 * devices only.
 375	 *
 376	 * Thus return an error if API doesn't support RSS Random Key retrieval
 377	 * or if the operation is not supported for this device type.
 378	 */
 379	switch (hw->api_version) {
 380	case ixgbe_mbox_api_14:
 381	case ixgbe_mbox_api_13:
 382	case ixgbe_mbox_api_12:
 383		if (hw->mac.type < ixgbe_mac_X550_vf)
 384			break;
 385		fallthrough;
 386	default:
 387		return -EOPNOTSUPP;
 388	}
 389
 390	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
 391	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 392
 393	if (err)
 394		return err;
 395
 396	err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
 397
 398	if (err)
 399		return err;
 400
 401	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 402
 403	/* If the operation has been refused by a PF return -EPERM */
 404	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
 405		return -EPERM;
 406
 407	/* If we didn't get an ACK there must have been
 408	 * some sort of mailbox error so we should treat it
 409	 * as such.
 410	 */
 411	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
 412		return IXGBE_ERR_MBX;
 413
 414	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
 415
 416	return 0;
 417}
 418
 419/**
 420 *  ixgbevf_set_rar_vf - set device MAC address
 421 *  @hw: pointer to hardware structure
 422 *  @index: Receive address register to write
 423 *  @addr: Address to put into receive address register
 424 *  @vmdq: Unused in this implementation
 425 **/
 426static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 427			      u32 vmdq)
 428{
 429	u32 msgbuf[3];
 430	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 431	s32 ret_val;
 432
 433	memset(msgbuf, 0, sizeof(msgbuf));
 434	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
 435	ether_addr_copy(msg_addr, addr);
 436
 437	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 438					     ARRAY_SIZE(msgbuf));
 
 439	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 440
 441	/* if nacked the address was rejected, use "perm_addr" */
 442	if (!ret_val &&
 443	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
 444		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
 445		return IXGBE_ERR_MBX;
 446	}
 447
 448	return ret_val;
 449}
 450
 451/**
 452 *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
 453 *  @hw: pointer to hardware structure
 454 *  @index: Receive address register to write
 455 *  @addr: Address to put into receive address register
 456 *  @vmdq: Unused in this implementation
 457 *
 458 * We don't really allow setting the device MAC address. However,
 459 * if the address being set is the permanent MAC address we will
 460 * permit that.
 461 **/
 462static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 463				 u32 vmdq)
 464{
 465	if (ether_addr_equal(addr, hw->mac.perm_addr))
 466		return 0;
 467
 468	return -EOPNOTSUPP;
 469}
 470
 471/**
 472 *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
 473 *  @hw: pointer to the HW structure
 474 *  @netdev: pointer to net device structure
 475 *
 476 *  Updates the Multicast Table Array.
 477 **/
 478static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 479					  struct net_device *netdev)
 480{
 481	struct netdev_hw_addr *ha;
 482	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 483	u16 *vector_list = (u16 *)&msgbuf[1];
 484	u32 cnt, i;
 485
 486	/* Each entry in the list uses 1 16 bit word.  We have 30
 487	 * 16 bit words available in our HW msg buffer (minus 1 for the
 488	 * msg type).  That's 30 hash values if we pack 'em right.  If
 489	 * there are more than 30 MC addresses to add then punt the
 490	 * extras for now and then add code to handle more than 30 later.
 491	 * It would be unusual for a server to request that many multi-cast
 492	 * addresses except for in large enterprise network environments.
 493	 */
 494
 495	cnt = netdev_mc_count(netdev);
 496	if (cnt > 30)
 497		cnt = 30;
 498	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
 499	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
 500
 501	i = 0;
 502	netdev_for_each_mc_addr(ha, netdev) {
 503		if (i == cnt)
 504			break;
 505		if (is_link_local_ether_addr(ha->addr))
 506			continue;
 507
 508		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
 509	}
 510
 511	return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 512			IXGBE_VFMAILBOX_SIZE);
 
 513}
 514
 515/**
 516 * Hyper-V variant - just a stub.
 517 * @hw: unused
 518 * @netdev: unused
 519 */
 520static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 521					     struct net_device *netdev)
 522{
 523	return -EOPNOTSUPP;
 524}
 525
 526/**
 527 *  ixgbevf_update_xcast_mode - Update Multicast mode
 528 *  @hw: pointer to the HW structure
 529 *  @xcast_mode: new multicast mode
 530 *
 531 *  Updates the Multicast Mode of VF.
 532 **/
 533static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 534{
 535	u32 msgbuf[2];
 536	s32 err;
 537
 538	switch (hw->api_version) {
 539	case ixgbe_mbox_api_12:
 540		/* promisc introduced in 1.3 version */
 541		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
 542			return -EOPNOTSUPP;
 543		fallthrough;
 544	case ixgbe_mbox_api_14:
 545	case ixgbe_mbox_api_13:
 546		break;
 547	default:
 548		return -EOPNOTSUPP;
 549	}
 550
 551	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
 552	msgbuf[1] = xcast_mode;
 553
 554	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 555					 ARRAY_SIZE(msgbuf));
 556	if (err)
 557		return err;
 558
 559	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 560	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
 561		return -EPERM;
 562
 563	return 0;
 564}
 565
 566/**
 567 * Hyper-V variant - just a stub.
 568 * @hw: unused
 569 * @xcast_mode: unused
 570 */
 571static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 572{
 573	return -EOPNOTSUPP;
 574}
 575
 576/**
 577 *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
 578 *  @hw: pointer to the HW structure
 579 *  @vlan: 12 bit VLAN ID
 580 *  @vind: unused by VF drivers
 581 *  @vlan_on: if true then set bit, else clear bit
 582 **/
 583static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 584			       bool vlan_on)
 585{
 586	u32 msgbuf[2];
 587	s32 err;
 588
 589	msgbuf[0] = IXGBE_VF_SET_VLAN;
 590	msgbuf[1] = vlan;
 591	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
 592	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 593
 594	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 595					 ARRAY_SIZE(msgbuf));
 596	if (err)
 597		goto mbx_err;
 598
 599	/* remove extra bits from the message */
 600	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 601	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
 602
 603	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
 604		err = IXGBE_ERR_INVALID_ARGUMENT;
 605
 606mbx_err:
 607	return err;
 608}
 609
 610/**
 611 * Hyper-V variant - just a stub.
 612 * @hw: unused
 613 * @vlan: unused
 614 * @vind: unused
 615 * @vlan_on: unused
 616 */
 617static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 618				  bool vlan_on)
 619{
 620	return -EOPNOTSUPP;
 621}
 622
 623/**
 624 *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
 625 *  @hw: pointer to hardware structure
 626 *  @speed: Unused in this implementation
 627 *  @autoneg: Unused in this implementation
 628 *  @autoneg_wait_to_complete: Unused in this implementation
 629 *
 630 *  Do nothing and return success.  VF drivers are not allowed to change
 631 *  global settings.  Maintained for driver compatibility.
 632 **/
 633static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
 634				     ixgbe_link_speed speed, bool autoneg,
 635				     bool autoneg_wait_to_complete)
 636{
 637	return 0;
 638}
 639
 640/**
 641 *  ixgbevf_check_mac_link_vf - Get link/speed status
 642 *  @hw: pointer to hardware structure
 643 *  @speed: pointer to link speed
 644 *  @link_up: true is link is up, false otherwise
 645 *  @autoneg_wait_to_complete: unused
 646 *
 647 *  Reads the links register to determine if link is up and the current speed
 648 **/
 649static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
 650				     ixgbe_link_speed *speed,
 651				     bool *link_up,
 652				     bool autoneg_wait_to_complete)
 653{
 654	struct ixgbe_mbx_info *mbx = &hw->mbx;
 655	struct ixgbe_mac_info *mac = &hw->mac;
 656	s32 ret_val = 0;
 657	u32 links_reg;
 658	u32 in_msg = 0;
 659
 660	/* If we were hit with a reset drop the link */
 661	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 662		mac->get_link_status = true;
 663
 664	if (!mac->get_link_status)
 665		goto out;
 666
 667	/* if link status is down no point in checking to see if pf is up */
 668	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 669	if (!(links_reg & IXGBE_LINKS_UP))
 670		goto out;
 671
 672	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 673	 * before the link status is correct
 674	 */
 675	if (mac->type == ixgbe_mac_82599_vf) {
 676		int i;
 677
 678		for (i = 0; i < 5; i++) {
 679			udelay(100);
 680			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 681
 682			if (!(links_reg & IXGBE_LINKS_UP))
 683				goto out;
 684		}
 685	}
 686
 687	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 688	case IXGBE_LINKS_SPEED_10G_82599:
 689		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 690		break;
 691	case IXGBE_LINKS_SPEED_1G_82599:
 692		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 693		break;
 694	case IXGBE_LINKS_SPEED_100_82599:
 695		*speed = IXGBE_LINK_SPEED_100_FULL;
 696		break;
 697	}
 698
 699	/* if the read failed it could just be a mailbox collision, best wait
 700	 * until we are called again and don't report an error
 701	 */
 702	if (mbx->ops.read(hw, &in_msg, 1))
 703		goto out;
 704
 705	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
 706		/* msg is not CTS and is NACK we must have lost CTS status */
 707		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
 708			ret_val = -1;
 709		goto out;
 710	}
 711
 712	/* the pf is talking, if we timed out in the past we reinit */
 713	if (!mbx->timeout) {
 714		ret_val = -1;
 715		goto out;
 716	}
 717
 718	/* if we passed all the tests above then the link is up and we no
 719	 * longer need to check for link
 720	 */
 721	mac->get_link_status = false;
 722
 723out:
 724	*link_up = !mac->get_link_status;
 725	return ret_val;
 726}
 727
 728/**
 729 * Hyper-V variant; there is no mailbox communication.
 730 * @hw: pointer to private hardware struct
 731 * @speed: pointer to link speed
 732 * @link_up: true is link is up, false otherwise
 733 * @autoneg_wait_to_complete: unused
 734 */
 735static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
 736					ixgbe_link_speed *speed,
 737					bool *link_up,
 738					bool autoneg_wait_to_complete)
 739{
 740	struct ixgbe_mbx_info *mbx = &hw->mbx;
 741	struct ixgbe_mac_info *mac = &hw->mac;
 742	u32 links_reg;
 743
 744	/* If we were hit with a reset drop the link */
 745	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 746		mac->get_link_status = true;
 747
 748	if (!mac->get_link_status)
 749		goto out;
 750
 751	/* if link status is down no point in checking to see if pf is up */
 752	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 753	if (!(links_reg & IXGBE_LINKS_UP))
 754		goto out;
 755
 756	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 757	 * before the link status is correct
 758	 */
 759	if (mac->type == ixgbe_mac_82599_vf) {
 760		int i;
 761
 762		for (i = 0; i < 5; i++) {
 763			udelay(100);
 764			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 765
 766			if (!(links_reg & IXGBE_LINKS_UP))
 767				goto out;
 768		}
 769	}
 770
 771	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 772	case IXGBE_LINKS_SPEED_10G_82599:
 773		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 774		break;
 775	case IXGBE_LINKS_SPEED_1G_82599:
 776		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 777		break;
 778	case IXGBE_LINKS_SPEED_100_82599:
 779		*speed = IXGBE_LINK_SPEED_100_FULL;
 780		break;
 781	}
 782
 783	/* if we passed all the tests above then the link is up and we no
 784	 * longer need to check for link
 785	 */
 786	mac->get_link_status = false;
 787
 788out:
 789	*link_up = !mac->get_link_status;
 790	return 0;
 791}
 792
 793/**
 794 *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
 795 *  @hw: pointer to the HW structure
 796 *  @max_size: value to assign to max frame size
 797 **/
 798static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 799{
 800	u32 msgbuf[2];
 801	s32 ret_val;
 802
 803	msgbuf[0] = IXGBE_VF_SET_LPE;
 804	msgbuf[1] = max_size;
 805
 806	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 807					     ARRAY_SIZE(msgbuf));
 808	if (ret_val)
 809		return ret_val;
 810	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
 811	    (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
 812		return IXGBE_ERR_MBX;
 813
 814	return 0;
 815}
 816
 817/**
 818 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
 819 * @hw: pointer to the HW structure
 820 * @max_size: value to assign to max frame size
 821 * Hyper-V variant.
 822 **/
 823static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 824{
 825	u32 reg;
 826
 827	/* If we are on Hyper-V, we implement this functionality
 828	 * differently.
 829	 */
 830	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
 831	/* CRC == 4 */
 832	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
 833	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
 834
 835	return 0;
 836}
 837
 838/**
 839 *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
 840 *  @hw: pointer to the HW structure
 841 *  @api: integer containing requested API version
 842 **/
 843static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 844{
 845	int err;
 846	u32 msg[3];
 847
 848	/* Negotiate the mailbox API version */
 849	msg[0] = IXGBE_VF_API_NEGOTIATE;
 850	msg[1] = api;
 851	msg[2] = 0;
 852
 853	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 
 854	if (!err) {
 855		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 856
 857		/* Store value and return 0 on success */
 858		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
 859			hw->api_version = api;
 860			return 0;
 861		}
 862
 863		err = IXGBE_ERR_INVALID_ARGUMENT;
 864	}
 865
 866	return err;
 867}
 868
 869/**
 870 *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
 871 *  @hw: pointer to the HW structure
 872 *  @api: integer containing requested API version
 873 *  Hyper-V version - only ixgbe_mbox_api_10 supported.
 874 **/
 875static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 876{
 877	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
 878	if (api != ixgbe_mbox_api_10)
 879		return IXGBE_ERR_INVALID_ARGUMENT;
 880
 881	return 0;
 882}
 883
 884int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 885		       unsigned int *default_tc)
 886{
 887	int err;
 888	u32 msg[5];
 889
 890	/* do nothing if API doesn't support ixgbevf_get_queues */
 891	switch (hw->api_version) {
 892	case ixgbe_mbox_api_11:
 893	case ixgbe_mbox_api_12:
 894	case ixgbe_mbox_api_13:
 895	case ixgbe_mbox_api_14:
 896		break;
 897	default:
 898		return 0;
 899	}
 900
 901	/* Fetch queue configuration from the PF */
 902	msg[0] = IXGBE_VF_GET_QUEUE;
 903	msg[1] = msg[2] = msg[3] = msg[4] = 0;
 904
 905	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 
 906	if (!err) {
 907		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 908
 909		/* if we we didn't get an ACK there must have been
 910		 * some sort of mailbox error so we should treat it
 911		 * as such
 912		 */
 913		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
 914			return IXGBE_ERR_MBX;
 915
 916		/* record and validate values from message */
 917		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
 918		if (hw->mac.max_tx_queues == 0 ||
 919		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
 920			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
 921
 922		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
 923		if (hw->mac.max_rx_queues == 0 ||
 924		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
 925			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
 926
 927		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
 928		/* in case of unknown state assume we cannot tag frames */
 929		if (*num_tcs > hw->mac.max_rx_queues)
 930			*num_tcs = 1;
 931
 932		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
 933		/* default to queue 0 on out-of-bounds queue number */
 934		if (*default_tc >= hw->mac.max_tx_queues)
 935			*default_tc = 0;
 936	}
 937
 938	return err;
 939}
 940
 941static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
 942	.init_hw		= ixgbevf_init_hw_vf,
 943	.reset_hw		= ixgbevf_reset_hw_vf,
 944	.start_hw		= ixgbevf_start_hw_vf,
 945	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
 946	.stop_adapter		= ixgbevf_stop_hw_vf,
 947	.setup_link		= ixgbevf_setup_mac_link_vf,
 948	.check_link		= ixgbevf_check_mac_link_vf,
 949	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
 950	.set_rar		= ixgbevf_set_rar_vf,
 951	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
 952	.update_xcast_mode	= ixgbevf_update_xcast_mode,
 953	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
 954	.set_vfta		= ixgbevf_set_vfta_vf,
 955	.set_rlpml		= ixgbevf_set_rlpml_vf,
 956};
 957
 958static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
 959	.init_hw		= ixgbevf_init_hw_vf,
 960	.reset_hw		= ixgbevf_hv_reset_hw_vf,
 961	.start_hw		= ixgbevf_start_hw_vf,
 962	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
 963	.stop_adapter		= ixgbevf_stop_hw_vf,
 964	.setup_link		= ixgbevf_setup_mac_link_vf,
 965	.check_link		= ixgbevf_hv_check_mac_link_vf,
 966	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
 967	.set_rar		= ixgbevf_hv_set_rar_vf,
 968	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
 969	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
 970	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
 971	.set_vfta		= ixgbevf_hv_set_vfta_vf,
 972	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
 973};
 974
 975const struct ixgbevf_info ixgbevf_82599_vf_info = {
 976	.mac = ixgbe_mac_82599_vf,
 977	.mac_ops = &ixgbevf_mac_ops,
 978};
 979
 980const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
 981	.mac = ixgbe_mac_82599_vf,
 982	.mac_ops = &ixgbevf_hv_mac_ops,
 983};
 984
 985const struct ixgbevf_info ixgbevf_X540_vf_info = {
 986	.mac = ixgbe_mac_X540_vf,
 987	.mac_ops = &ixgbevf_mac_ops,
 988};
 989
 990const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
 991	.mac = ixgbe_mac_X540_vf,
 992	.mac_ops = &ixgbevf_hv_mac_ops,
 993};
 994
 995const struct ixgbevf_info ixgbevf_X550_vf_info = {
 996	.mac = ixgbe_mac_X550_vf,
 997	.mac_ops = &ixgbevf_mac_ops,
 998};
 999
1000const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1001	.mac = ixgbe_mac_X550_vf,
1002	.mac_ops = &ixgbevf_hv_mac_ops,
1003};
1004
1005const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1006	.mac = ixgbe_mac_X550EM_x_vf,
1007	.mac_ops = &ixgbevf_mac_ops,
1008};
1009
1010const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1011	.mac = ixgbe_mac_X550EM_x_vf,
1012	.mac_ops = &ixgbevf_hv_mac_ops,
1013};
1014
1015const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1016	.mac = ixgbe_mac_x550em_a_vf,
1017	.mac_ops = &ixgbevf_mac_ops,
1018};