Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4#include "vf.h"
   5#include "ixgbevf.h"
   6
   7/* On Hyper-V, to reset, we need to read from this offset
   8 * from the PCI config space. This is the mechanism used on
   9 * Hyper-V to support PF/VF communication.
  10 */
  11#define IXGBE_HV_RESET_OFFSET           0x201
  12
  13static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
  14					     u32 *retmsg, u16 size)
  15{
  16	s32 retval = ixgbevf_write_mbx(hw, msg, size);
 
  17
  18	if (retval)
  19		return retval;
  20
  21	return ixgbevf_poll_mbx(hw, retmsg, size);
  22}
  23
  24/**
  25 *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  26 *  @hw: pointer to hardware structure
  27 *
  28 *  Starts the hardware by filling the bus info structure and media type, clears
  29 *  all on chip counters, initializes receive address registers, multicast
  30 *  table, VLAN filter table, calls routine to set up link and flow control
  31 *  settings, and leaves transmit and receive units disabled and uninitialized
  32 **/
  33static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
  34{
  35	/* Clear adapter stopped flag */
  36	hw->adapter_stopped = false;
  37
  38	return 0;
  39}
  40
  41/**
  42 *  ixgbevf_init_hw_vf - virtual function hardware initialization
  43 *  @hw: pointer to hardware structure
  44 *
  45 *  Initialize the hardware by resetting the hardware and then starting
  46 *  the hardware
  47 **/
  48static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  49{
  50	s32 status = hw->mac.ops.start_hw(hw);
  51
  52	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
  53
  54	return status;
  55}
  56
  57/**
  58 *  ixgbevf_reset_hw_vf - Performs hardware reset
  59 *  @hw: pointer to hardware structure
  60 *
  61 *  Resets the hardware by resetting the transmit and receive units, masks and
  62 *  clears all interrupts.
  63 **/
  64static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
  65{
  66	struct ixgbe_mbx_info *mbx = &hw->mbx;
  67	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
 
  68	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
  69	u8 *addr = (u8 *)(&msgbuf[1]);
  70	s32 ret_val;
  71
  72	/* Call adapter stop to disable tx/rx and clear interrupts */
  73	hw->mac.ops.stop_adapter(hw);
  74
  75	/* reset the api version */
  76	hw->api_version = ixgbe_mbox_api_10;
  77	hw->mbx.ops.init_params(hw);
  78	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
  79	       sizeof(struct ixgbe_mbx_operations));
  80
  81	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
  82	IXGBE_WRITE_FLUSH(hw);
  83
  84	/* we cannot reset while the RSTI / RSTD bits are asserted */
  85	while (!mbx->ops.check_for_rst(hw) && timeout) {
  86		timeout--;
  87		udelay(5);
  88	}
  89
  90	if (!timeout)
  91		return IXGBE_ERR_RESET_FAILED;
  92
  93	/* mailbox timeout can now become active */
  94	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
  95
  96	msgbuf[0] = IXGBE_VF_RESET;
  97	ixgbevf_write_mbx(hw, msgbuf, 1);
  98
  99	mdelay(10);
 100
 101	/* set our "perm_addr" based on info provided by PF
 102	 * also set up the mc_filter_type which is piggy backed
 103	 * on the mac address in word 3
 104	 */
 105	ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
 106	if (ret_val)
 107		return ret_val;
 108
 109	/* New versions of the PF may NACK the reset return message
 110	 * to indicate that no MAC address has yet been assigned for
 111	 * the VF.
 112	 */
 113	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
 114	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
 115		return IXGBE_ERR_INVALID_MAC_ADDR;
 116
 117	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
 118		ether_addr_copy(hw->mac.perm_addr, addr);
 119
 120	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 121
 122	return 0;
 123}
 124
 125/**
 126 * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
 127 * @hw: pointer to private hardware struct
 128 *
 129 * Hyper-V variant; the VF/PF communication is through the PCI
 130 * config space.
 131 */
 132static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
 133{
 134#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
 135	struct ixgbevf_adapter *adapter = hw->back;
 136	int i;
 137
 138	for (i = 0; i < 6; i++)
 139		pci_read_config_byte(adapter->pdev,
 140				     (i + IXGBE_HV_RESET_OFFSET),
 141				     &hw->mac.perm_addr[i]);
 142	return 0;
 143#else
 144	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
 145	return -EOPNOTSUPP;
 146#endif
 147}
 148
 149/**
 150 *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
 151 *  @hw: pointer to hardware structure
 152 *
 153 *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
 154 *  disables transmit and receive units. The adapter_stopped flag is used by
 155 *  the shared code and drivers to determine if the adapter is in a stopped
 156 *  state and should not touch the hardware.
 157 **/
 158static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
 159{
 160	u32 number_of_queues;
 161	u32 reg_val;
 162	u16 i;
 163
 164	/* Set the adapter_stopped flag so other driver functions stop touching
 165	 * the hardware
 166	 */
 167	hw->adapter_stopped = true;
 168
 169	/* Disable the receive unit by stopped each queue */
 170	number_of_queues = hw->mac.max_rx_queues;
 171	for (i = 0; i < number_of_queues; i++) {
 172		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 173		if (reg_val & IXGBE_RXDCTL_ENABLE) {
 174			reg_val &= ~IXGBE_RXDCTL_ENABLE;
 175			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
 176		}
 177	}
 178
 179	IXGBE_WRITE_FLUSH(hw);
 180
 181	/* Clear interrupt mask to stop from interrupts being generated */
 182	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
 183
 184	/* Clear any pending interrupts */
 185	IXGBE_READ_REG(hw, IXGBE_VTEICR);
 186
 187	/* Disable the transmit unit.  Each queue must be disabled. */
 188	number_of_queues = hw->mac.max_tx_queues;
 189	for (i = 0; i < number_of_queues; i++) {
 190		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 191		if (reg_val & IXGBE_TXDCTL_ENABLE) {
 192			reg_val &= ~IXGBE_TXDCTL_ENABLE;
 193			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
 194		}
 195	}
 196
 197	return 0;
 198}
 199
 200/**
 201 *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
 202 *  @hw: pointer to hardware structure
 203 *  @mc_addr: the multicast address
 204 *
 205 *  Extracts the 12 bits, from a multicast address, to determine which
 206 *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
 207 *  incoming Rx multicast addresses, to determine the bit-vector to check in
 208 *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
 209 *  by the MO field of the MCSTCTRL. The MO field is set during initialization
 210 *  to mc_filter_type.
 211 **/
 212static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
 213{
 214	u32 vector = 0;
 215
 216	switch (hw->mac.mc_filter_type) {
 217	case 0:   /* use bits [47:36] of the address */
 218		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
 219		break;
 220	case 1:   /* use bits [46:35] of the address */
 221		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
 222		break;
 223	case 2:   /* use bits [45:34] of the address */
 224		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
 225		break;
 226	case 3:   /* use bits [43:32] of the address */
 227		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
 228		break;
 229	default:  /* Invalid mc_filter_type */
 230		break;
 231	}
 232
 233	/* vector can only be 12-bits or boundary will be exceeded */
 234	vector &= 0xFFF;
 235	return vector;
 236}
 237
 238/**
 239 *  ixgbevf_get_mac_addr_vf - Read device MAC address
 240 *  @hw: pointer to the HW structure
 241 *  @mac_addr: pointer to storage for retrieved MAC address
 242 **/
 243static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 244{
 245	ether_addr_copy(mac_addr, hw->mac.perm_addr);
 246
 247	return 0;
 248}
 249
 250static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 251{
 252	u32 msgbuf[3], msgbuf_chk;
 253	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 254	s32 ret_val;
 255
 256	memset(msgbuf, 0, sizeof(msgbuf));
 257	/* If index is one then this is the start of a new list and needs
 258	 * indication to the PF so it can do it's own list management.
 259	 * If it is zero then that tells the PF to just clear all of
 260	 * this VF's macvlans and there is no new list.
 261	 */
 262	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
 263	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
 264	msgbuf_chk = msgbuf[0];
 265
 266	if (addr)
 267		ether_addr_copy(msg_addr, addr);
 268
 269	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 270					     ARRAY_SIZE(msgbuf));
 271	if (!ret_val) {
 272		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 273
 274		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
 275			return -ENOMEM;
 276	}
 277
 278	return ret_val;
 279}
 280
 281static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 282{
 283	return -EOPNOTSUPP;
 284}
 285
 286/**
 287 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
 288 * @hw: pointer to hardware structure
 289 * @reta: buffer to fill with RETA contents.
 290 * @num_rx_queues: Number of Rx queues configured for this port
 291 *
 292 * The "reta" buffer should be big enough to contain 32 registers.
 293 *
 294 * Returns: 0 on success.
 295 *          if API doesn't support this operation - (-EOPNOTSUPP).
 296 */
 297int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
 298{
 299	int err, i, j;
 300	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 301	u32 *hw_reta = &msgbuf[1];
 302	u32 mask = 0;
 303
 304	/* We have to use a mailbox for 82599 and x540 devices only.
 305	 * For these devices RETA has 128 entries.
 306	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
 307	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
 308	 */
 309	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
 310
 311	/* We support the RSS querying for 82599 and x540 devices only.
 312	 * Thus return an error if API doesn't support RETA querying or querying
 313	 * is not supported for this device type.
 314	 */
 315	switch (hw->api_version) {
 316	case ixgbe_mbox_api_15:
 317	case ixgbe_mbox_api_14:
 318	case ixgbe_mbox_api_13:
 319	case ixgbe_mbox_api_12:
 320		if (hw->mac.type < ixgbe_mac_X550_vf)
 321			break;
 322		fallthrough;
 323	default:
 324		return -EOPNOTSUPP;
 325	}
 326
 327	msgbuf[0] = IXGBE_VF_GET_RETA;
 328
 329	err = ixgbevf_write_mbx(hw, msgbuf, 1);
 330
 331	if (err)
 332		return err;
 333
 334	err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
 335
 336	if (err)
 337		return err;
 338
 339	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 340
 341	/* If the operation has been refused by a PF return -EPERM */
 342	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
 343		return -EPERM;
 344
 345	/* If we didn't get an ACK there must have been
 346	 * some sort of mailbox error so we should treat it
 347	 * as such.
 348	 */
 349	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
 350		return IXGBE_ERR_MBX;
 351
 352	/* ixgbevf doesn't support more than 2 queues at the moment */
 353	if (num_rx_queues > 1)
 354		mask = 0x1;
 355
 356	for (i = 0; i < dwords; i++)
 357		for (j = 0; j < 16; j++)
 358			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
 359
 360	return 0;
 361}
 362
 363/**
 364 * ixgbevf_get_rss_key_locked - get the RSS Random Key
 365 * @hw: pointer to the HW structure
 366 * @rss_key: buffer to fill with RSS Hash Key contents.
 367 *
 368 * The "rss_key" buffer should be big enough to contain 10 registers.
 369 *
 370 * Returns: 0 on success.
 371 *          if API doesn't support this operation - (-EOPNOTSUPP).
 372 */
 373int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
 374{
 375	int err;
 376	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 377
 378	/* We currently support the RSS Random Key retrieval for 82599 and x540
 379	 * devices only.
 380	 *
 381	 * Thus return an error if API doesn't support RSS Random Key retrieval
 382	 * or if the operation is not supported for this device type.
 383	 */
 384	switch (hw->api_version) {
 385	case ixgbe_mbox_api_15:
 386	case ixgbe_mbox_api_14:
 387	case ixgbe_mbox_api_13:
 388	case ixgbe_mbox_api_12:
 389		if (hw->mac.type < ixgbe_mac_X550_vf)
 390			break;
 391		fallthrough;
 392	default:
 393		return -EOPNOTSUPP;
 394	}
 395
 396	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
 397	err = ixgbevf_write_mbx(hw, msgbuf, 1);
 398
 399	if (err)
 400		return err;
 401
 402	err = ixgbevf_poll_mbx(hw, msgbuf, 11);
 403
 404	if (err)
 405		return err;
 406
 407	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 408
 409	/* If the operation has been refused by a PF return -EPERM */
 410	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
 411		return -EPERM;
 412
 413	/* If we didn't get an ACK there must have been
 414	 * some sort of mailbox error so we should treat it
 415	 * as such.
 416	 */
 417	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
 418		return IXGBE_ERR_MBX;
 419
 420	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
 421
 422	return 0;
 423}
 424
 425/**
 426 *  ixgbevf_set_rar_vf - set device MAC address
 427 *  @hw: pointer to hardware structure
 428 *  @index: Receive address register to write
 429 *  @addr: Address to put into receive address register
 430 *  @vmdq: Unused in this implementation
 431 **/
 432static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 433			      u32 vmdq)
 434{
 435	u32 msgbuf[3];
 436	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 437	s32 ret_val;
 438
 439	memset(msgbuf, 0, sizeof(msgbuf));
 440	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
 441	ether_addr_copy(msg_addr, addr);
 442
 443	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 444					     ARRAY_SIZE(msgbuf));
 445	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 446
 447	/* if nacked the address was rejected, use "perm_addr" */
 448	if (!ret_val &&
 449	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
 450		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
 451		return IXGBE_ERR_MBX;
 452	}
 453
 454	return ret_val;
 455}
 456
 457/**
 458 *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
 459 *  @hw: pointer to hardware structure
 460 *  @index: Receive address register to write
 461 *  @addr: Address to put into receive address register
 462 *  @vmdq: Unused in this implementation
 463 *
 464 * We don't really allow setting the device MAC address. However,
 465 * if the address being set is the permanent MAC address we will
 466 * permit that.
 467 **/
 468static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 469				 u32 vmdq)
 470{
 471	if (ether_addr_equal(addr, hw->mac.perm_addr))
 472		return 0;
 473
 474	return -EOPNOTSUPP;
 475}
 476
 477/**
 478 *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
 479 *  @hw: pointer to the HW structure
 480 *  @netdev: pointer to net device structure
 481 *
 482 *  Updates the Multicast Table Array.
 483 **/
 484static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 485					  struct net_device *netdev)
 486{
 487	struct netdev_hw_addr *ha;
 488	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 489	u16 *vector_list = (u16 *)&msgbuf[1];
 490	u32 cnt, i;
 491
 492	/* Each entry in the list uses 1 16 bit word.  We have 30
 493	 * 16 bit words available in our HW msg buffer (minus 1 for the
 494	 * msg type).  That's 30 hash values if we pack 'em right.  If
 495	 * there are more than 30 MC addresses to add then punt the
 496	 * extras for now and then add code to handle more than 30 later.
 497	 * It would be unusual for a server to request that many multi-cast
 498	 * addresses except for in large enterprise network environments.
 499	 */
 500
 501	cnt = netdev_mc_count(netdev);
 502	if (cnt > 30)
 503		cnt = 30;
 504	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
 505	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
 506
 507	i = 0;
 508	netdev_for_each_mc_addr(ha, netdev) {
 509		if (i == cnt)
 510			break;
 511		if (is_link_local_ether_addr(ha->addr))
 512			continue;
 513
 514		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
 515	}
 516
 517	return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 518			IXGBE_VFMAILBOX_SIZE);
 519}
 520
 521/**
 522 * ixgbevf_hv_update_mc_addr_list_vf - stub
 523 * @hw: unused
 524 * @netdev: unused
 525 *
 526 * Hyper-V variant - just a stub.
 527 */
 528static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 529					     struct net_device *netdev)
 530{
 531	return -EOPNOTSUPP;
 532}
 533
 534/**
 535 *  ixgbevf_update_xcast_mode - Update Multicast mode
 536 *  @hw: pointer to the HW structure
 537 *  @xcast_mode: new multicast mode
 538 *
 539 *  Updates the Multicast Mode of VF.
 540 **/
 541static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 542{
 543	u32 msgbuf[2];
 544	s32 err;
 545
 546	switch (hw->api_version) {
 547	case ixgbe_mbox_api_12:
 548		/* promisc introduced in 1.3 version */
 549		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
 550			return -EOPNOTSUPP;
 551		fallthrough;
 552	case ixgbe_mbox_api_13:
 553	case ixgbe_mbox_api_14:
 554	case ixgbe_mbox_api_15:
 555		break;
 556	default:
 557		return -EOPNOTSUPP;
 558	}
 559
 560	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
 561	msgbuf[1] = xcast_mode;
 562
 563	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 564					 ARRAY_SIZE(msgbuf));
 565	if (err)
 566		return err;
 567
 568	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 569	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
 570		return -EPERM;
 571
 572	return 0;
 573}
 574
 575/**
 576 * ixgbevf_hv_update_xcast_mode - stub
 577 * @hw: unused
 578 * @xcast_mode: unused
 579 *
 580 * Hyper-V variant - just a stub.
 581 */
 582static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 583{
 584	return -EOPNOTSUPP;
 585}
 586
 587/**
 588 * ixgbevf_get_link_state_vf - Get VF link state from PF
 589 * @hw: pointer to the HW structure
 590 * @link_state: link state storage
 591 *
 592 * Returns state of the operation error or success.
 593 */
 594static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
 595{
 596	u32 msgbuf[2];
 597	s32 ret_val;
 598	s32 err;
 599
 600	msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
 601	msgbuf[1] = 0x0;
 602
 603	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
 604
 605	if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
 606		ret_val = IXGBE_ERR_MBX;
 607	} else {
 608		ret_val = 0;
 609		*link_state = msgbuf[1];
 610	}
 611
 612	return ret_val;
 613}
 614
 615/**
 616 * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
 617 * @hw: unused
 618 * @link_state: unused
 619 *
 620 * Hyper-V variant; there is no mailbox communication.
 621 */
 622static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
 623{
 624	return -EOPNOTSUPP;
 625}
 626
 627/**
 628 *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
 629 *  @hw: pointer to the HW structure
 630 *  @vlan: 12 bit VLAN ID
 631 *  @vind: unused by VF drivers
 632 *  @vlan_on: if true then set bit, else clear bit
 633 **/
 634static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 635			       bool vlan_on)
 636{
 637	u32 msgbuf[2];
 638	s32 err;
 639
 640	msgbuf[0] = IXGBE_VF_SET_VLAN;
 641	msgbuf[1] = vlan;
 642	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
 643	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 644
 645	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 646					 ARRAY_SIZE(msgbuf));
 647	if (err)
 648		goto mbx_err;
 649
 650	/* remove extra bits from the message */
 651	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 652	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
 653
 654	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
 655		err = IXGBE_ERR_INVALID_ARGUMENT;
 656
 657mbx_err:
 658	return err;
 659}
 660
 661/**
 662 * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
 663 * @hw: unused
 664 * @vlan: unused
 665 * @vind: unused
 666 * @vlan_on: unused
 667 */
 668static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 669				  bool vlan_on)
 670{
 671	return -EOPNOTSUPP;
 672}
 673
 674/**
 675 *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
 676 *  @hw: pointer to hardware structure
 677 *  @speed: Unused in this implementation
 678 *  @autoneg: Unused in this implementation
 679 *  @autoneg_wait_to_complete: Unused in this implementation
 680 *
 681 *  Do nothing and return success.  VF drivers are not allowed to change
 682 *  global settings.  Maintained for driver compatibility.
 683 **/
 684static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
 685				     ixgbe_link_speed speed, bool autoneg,
 686				     bool autoneg_wait_to_complete)
 687{
 688	return 0;
 689}
 690
 691/**
 692 *  ixgbevf_check_mac_link_vf - Get link/speed status
 693 *  @hw: pointer to hardware structure
 694 *  @speed: pointer to link speed
 695 *  @link_up: true is link is up, false otherwise
 696 *  @autoneg_wait_to_complete: unused
 697 *
 698 *  Reads the links register to determine if link is up and the current speed
 699 **/
 700static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
 701				     ixgbe_link_speed *speed,
 702				     bool *link_up,
 703				     bool autoneg_wait_to_complete)
 704{
 705	struct ixgbe_mbx_info *mbx = &hw->mbx;
 706	struct ixgbe_mac_info *mac = &hw->mac;
 707	s32 ret_val = 0;
 708	u32 links_reg;
 709	u32 in_msg = 0;
 710
 711	/* If we were hit with a reset drop the link */
 712	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 713		mac->get_link_status = true;
 714
 715	if (!mac->get_link_status)
 716		goto out;
 717
 718	/* if link status is down no point in checking to see if pf is up */
 719	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 720	if (!(links_reg & IXGBE_LINKS_UP))
 721		goto out;
 722
 723	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 724	 * before the link status is correct
 725	 */
 726	if (mac->type == ixgbe_mac_82599_vf) {
 727		int i;
 728
 729		for (i = 0; i < 5; i++) {
 730			udelay(100);
 731			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 732
 733			if (!(links_reg & IXGBE_LINKS_UP))
 734				goto out;
 735		}
 736	}
 737
 738	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 739	case IXGBE_LINKS_SPEED_10G_82599:
 740		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 741		break;
 742	case IXGBE_LINKS_SPEED_1G_82599:
 743		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 744		break;
 745	case IXGBE_LINKS_SPEED_100_82599:
 746		*speed = IXGBE_LINK_SPEED_100_FULL;
 747		break;
 748	}
 749
 750	/* if the read failed it could just be a mailbox collision, best wait
 751	 * until we are called again and don't report an error
 752	 */
 753	if (mbx->ops.read(hw, &in_msg, 1)) {
 754		if (hw->api_version >= ixgbe_mbox_api_15)
 755			mac->get_link_status = false;
 756		goto out;
 757	}
 758
 759	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
 760		/* msg is not CTS and is NACK we must have lost CTS status */
 761		if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
 762			ret_val = -1;
 763		goto out;
 764	}
 765
 766	/* the pf is talking, if we timed out in the past we reinit */
 767	if (!mbx->timeout) {
 768		ret_val = -1;
 769		goto out;
 770	}
 771
 772	/* if we passed all the tests above then the link is up and we no
 773	 * longer need to check for link
 774	 */
 775	mac->get_link_status = false;
 776
 777out:
 778	*link_up = !mac->get_link_status;
 779	return ret_val;
 780}
 781
 782/**
 783 * ixgbevf_hv_check_mac_link_vf - check link
 784 * @hw: pointer to private hardware struct
 785 * @speed: pointer to link speed
 786 * @link_up: true is link is up, false otherwise
 787 * @autoneg_wait_to_complete: unused
 788 *
 789 * Hyper-V variant; there is no mailbox communication.
 790 */
 791static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
 792					ixgbe_link_speed *speed,
 793					bool *link_up,
 794					bool autoneg_wait_to_complete)
 795{
 796	struct ixgbe_mbx_info *mbx = &hw->mbx;
 797	struct ixgbe_mac_info *mac = &hw->mac;
 798	u32 links_reg;
 799
 800	/* If we were hit with a reset drop the link */
 801	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 802		mac->get_link_status = true;
 803
 804	if (!mac->get_link_status)
 805		goto out;
 806
 807	/* if link status is down no point in checking to see if pf is up */
 808	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 809	if (!(links_reg & IXGBE_LINKS_UP))
 810		goto out;
 811
 812	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 813	 * before the link status is correct
 814	 */
 815	if (mac->type == ixgbe_mac_82599_vf) {
 816		int i;
 817
 818		for (i = 0; i < 5; i++) {
 819			udelay(100);
 820			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 821
 822			if (!(links_reg & IXGBE_LINKS_UP))
 823				goto out;
 824		}
 825	}
 826
 827	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 828	case IXGBE_LINKS_SPEED_10G_82599:
 829		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 830		break;
 831	case IXGBE_LINKS_SPEED_1G_82599:
 832		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 833		break;
 834	case IXGBE_LINKS_SPEED_100_82599:
 835		*speed = IXGBE_LINK_SPEED_100_FULL;
 836		break;
 837	}
 838
 839	/* if we passed all the tests above then the link is up and we no
 840	 * longer need to check for link
 841	 */
 842	mac->get_link_status = false;
 843
 844out:
 845	*link_up = !mac->get_link_status;
 846	return 0;
 847}
 848
 849/**
 850 *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
 851 *  @hw: pointer to the HW structure
 852 *  @max_size: value to assign to max frame size
 853 **/
 854static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 855{
 856	u32 msgbuf[2];
 857	s32 ret_val;
 858
 859	msgbuf[0] = IXGBE_VF_SET_LPE;
 860	msgbuf[1] = max_size;
 861
 862	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 863					     ARRAY_SIZE(msgbuf));
 864	if (ret_val)
 865		return ret_val;
 866	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
 867	    (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
 868		return IXGBE_ERR_MBX;
 869
 870	return 0;
 871}
 872
 873/**
 874 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
 875 * @hw: pointer to the HW structure
 876 * @max_size: value to assign to max frame size
 877 * Hyper-V variant.
 878 **/
 879static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 880{
 881	u32 reg;
 882
 883	/* If we are on Hyper-V, we implement this functionality
 884	 * differently.
 885	 */
 886	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
 887	/* CRC == 4 */
 888	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
 889	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
 890
 891	return 0;
 892}
 893
 894/**
 895 *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
 896 *  @hw: pointer to the HW structure
 897 *  @api: integer containing requested API version
 898 **/
 899static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 900{
 901	int err;
 902	u32 msg[3];
 903
 904	/* Negotiate the mailbox API version */
 905	msg[0] = IXGBE_VF_API_NEGOTIATE;
 906	msg[1] = api;
 907	msg[2] = 0;
 908
 909	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 910	if (!err) {
 911		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 912
 913		/* Store value and return 0 on success */
 914		if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
 915			      IXGBE_VT_MSGTYPE_SUCCESS)) {
 916			hw->api_version = api;
 917			return 0;
 918		}
 919
 920		err = IXGBE_ERR_INVALID_ARGUMENT;
 921	}
 922
 923	return err;
 924}
 925
 926/**
 927 *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
 928 *  @hw: pointer to the HW structure
 929 *  @api: integer containing requested API version
 930 *  Hyper-V version - only ixgbe_mbox_api_10 supported.
 931 **/
 932static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 933{
 934	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
 935	if (api != ixgbe_mbox_api_10)
 936		return IXGBE_ERR_INVALID_ARGUMENT;
 937
 938	return 0;
 939}
 940
 941int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 942		       unsigned int *default_tc)
 943{
 944	int err;
 945	u32 msg[5];
 946
 947	/* do nothing if API doesn't support ixgbevf_get_queues */
 948	switch (hw->api_version) {
 949	case ixgbe_mbox_api_11:
 950	case ixgbe_mbox_api_12:
 951	case ixgbe_mbox_api_13:
 952	case ixgbe_mbox_api_14:
 953	case ixgbe_mbox_api_15:
 954		break;
 955	default:
 956		return 0;
 957	}
 958
 959	/* Fetch queue configuration from the PF */
 960	msg[0] = IXGBE_VF_GET_QUEUE;
 961	msg[1] = msg[2] = msg[3] = msg[4] = 0;
 962
 963	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 964	if (!err) {
 965		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 966
 967		/* if we didn't get an ACK there must have been
 968		 * some sort of mailbox error so we should treat it
 969		 * as such
 970		 */
 971		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
 972			return IXGBE_ERR_MBX;
 973
 974		/* record and validate values from message */
 975		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
 976		if (hw->mac.max_tx_queues == 0 ||
 977		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
 978			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
 979
 980		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
 981		if (hw->mac.max_rx_queues == 0 ||
 982		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
 983			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
 984
 985		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
 986		/* in case of unknown state assume we cannot tag frames */
 987		if (*num_tcs > hw->mac.max_rx_queues)
 988			*num_tcs = 1;
 989
 990		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
 991		/* default to queue 0 on out-of-bounds queue number */
 992		if (*default_tc >= hw->mac.max_tx_queues)
 993			*default_tc = 0;
 994	}
 995
 996	return err;
 997}
 998
 999static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
1000	.init_hw		= ixgbevf_init_hw_vf,
1001	.reset_hw		= ixgbevf_reset_hw_vf,
1002	.start_hw		= ixgbevf_start_hw_vf,
1003	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
1004	.stop_adapter		= ixgbevf_stop_hw_vf,
1005	.setup_link		= ixgbevf_setup_mac_link_vf,
1006	.check_link		= ixgbevf_check_mac_link_vf,
1007	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
1008	.set_rar		= ixgbevf_set_rar_vf,
1009	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
1010	.update_xcast_mode	= ixgbevf_update_xcast_mode,
1011	.get_link_state		= ixgbevf_get_link_state_vf,
1012	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
1013	.set_vfta		= ixgbevf_set_vfta_vf,
1014	.set_rlpml		= ixgbevf_set_rlpml_vf,
1015};
1016
1017static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
1018	.init_hw		= ixgbevf_init_hw_vf,
1019	.reset_hw		= ixgbevf_hv_reset_hw_vf,
1020	.start_hw		= ixgbevf_start_hw_vf,
1021	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
1022	.stop_adapter		= ixgbevf_stop_hw_vf,
1023	.setup_link		= ixgbevf_setup_mac_link_vf,
1024	.check_link		= ixgbevf_hv_check_mac_link_vf,
1025	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
1026	.set_rar		= ixgbevf_hv_set_rar_vf,
1027	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
1028	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
1029	.get_link_state		= ixgbevf_hv_get_link_state_vf,
1030	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
1031	.set_vfta		= ixgbevf_hv_set_vfta_vf,
1032	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
1033};
1034
1035const struct ixgbevf_info ixgbevf_82599_vf_info = {
1036	.mac = ixgbe_mac_82599_vf,
1037	.mac_ops = &ixgbevf_mac_ops,
1038};
1039
1040const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
1041	.mac = ixgbe_mac_82599_vf,
1042	.mac_ops = &ixgbevf_hv_mac_ops,
1043};
1044
1045const struct ixgbevf_info ixgbevf_X540_vf_info = {
1046	.mac = ixgbe_mac_X540_vf,
1047	.mac_ops = &ixgbevf_mac_ops,
1048};
1049
1050const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1051	.mac = ixgbe_mac_X540_vf,
1052	.mac_ops = &ixgbevf_hv_mac_ops,
1053};
1054
1055const struct ixgbevf_info ixgbevf_X550_vf_info = {
1056	.mac = ixgbe_mac_X550_vf,
1057	.mac_ops = &ixgbevf_mac_ops,
1058};
1059
1060const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1061	.mac = ixgbe_mac_X550_vf,
1062	.mac_ops = &ixgbevf_hv_mac_ops,
1063};
1064
1065const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1066	.mac = ixgbe_mac_X550EM_x_vf,
1067	.mac_ops = &ixgbevf_mac_ops,
1068};
1069
1070const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1071	.mac = ixgbe_mac_X550EM_x_vf,
1072	.mac_ops = &ixgbevf_hv_mac_ops,
1073};
1074
1075const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1076	.mac = ixgbe_mac_x550em_a_vf,
1077	.mac_ops = &ixgbevf_mac_ops,
1078};
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4#include "vf.h"
   5#include "ixgbevf.h"
   6
   7/* On Hyper-V, to reset, we need to read from this offset
   8 * from the PCI config space. This is the mechanism used on
   9 * Hyper-V to support PF/VF communication.
  10 */
  11#define IXGBE_HV_RESET_OFFSET           0x201
  12
  13static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
  14					     u32 *retmsg, u16 size)
  15{
  16	struct ixgbe_mbx_info *mbx = &hw->mbx;
  17	s32 retval = mbx->ops.write_posted(hw, msg, size);
  18
  19	if (retval)
  20		return retval;
  21
  22	return mbx->ops.read_posted(hw, retmsg, size);
  23}
  24
  25/**
  26 *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  27 *  @hw: pointer to hardware structure
  28 *
  29 *  Starts the hardware by filling the bus info structure and media type, clears
  30 *  all on chip counters, initializes receive address registers, multicast
  31 *  table, VLAN filter table, calls routine to set up link and flow control
  32 *  settings, and leaves transmit and receive units disabled and uninitialized
  33 **/
  34static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
  35{
  36	/* Clear adapter stopped flag */
  37	hw->adapter_stopped = false;
  38
  39	return 0;
  40}
  41
  42/**
  43 *  ixgbevf_init_hw_vf - virtual function hardware initialization
  44 *  @hw: pointer to hardware structure
  45 *
  46 *  Initialize the hardware by resetting the hardware and then starting
  47 *  the hardware
  48 **/
  49static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  50{
  51	s32 status = hw->mac.ops.start_hw(hw);
  52
  53	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
  54
  55	return status;
  56}
  57
  58/**
  59 *  ixgbevf_reset_hw_vf - Performs hardware reset
  60 *  @hw: pointer to hardware structure
  61 *
  62 *  Resets the hardware by resetting the transmit and receive units, masks and
  63 *  clears all interrupts.
  64 **/
  65static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
  66{
  67	struct ixgbe_mbx_info *mbx = &hw->mbx;
  68	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
  69	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
  70	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
  71	u8 *addr = (u8 *)(&msgbuf[1]);
 
  72
  73	/* Call adapter stop to disable tx/rx and clear interrupts */
  74	hw->mac.ops.stop_adapter(hw);
  75
  76	/* reset the api version */
  77	hw->api_version = ixgbe_mbox_api_10;
 
 
 
  78
  79	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
  80	IXGBE_WRITE_FLUSH(hw);
  81
  82	/* we cannot reset while the RSTI / RSTD bits are asserted */
  83	while (!mbx->ops.check_for_rst(hw) && timeout) {
  84		timeout--;
  85		udelay(5);
  86	}
  87
  88	if (!timeout)
  89		return IXGBE_ERR_RESET_FAILED;
  90
  91	/* mailbox timeout can now become active */
  92	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
  93
  94	msgbuf[0] = IXGBE_VF_RESET;
  95	mbx->ops.write_posted(hw, msgbuf, 1);
  96
  97	mdelay(10);
  98
  99	/* set our "perm_addr" based on info provided by PF
 100	 * also set up the mc_filter_type which is piggy backed
 101	 * on the mac address in word 3
 102	 */
 103	ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
 104	if (ret_val)
 105		return ret_val;
 106
 107	/* New versions of the PF may NACK the reset return message
 108	 * to indicate that no MAC address has yet been assigned for
 109	 * the VF.
 110	 */
 111	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
 112	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
 113		return IXGBE_ERR_INVALID_MAC_ADDR;
 114
 115	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
 116		ether_addr_copy(hw->mac.perm_addr, addr);
 117
 118	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 119
 120	return 0;
 121}
 122
 123/**
 124 * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
 125 * @hw: pointer to private hardware struct
 126 *
 127 * Hyper-V variant; the VF/PF communication is through the PCI
 128 * config space.
 129 */
 130static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
 131{
 132#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
 133	struct ixgbevf_adapter *adapter = hw->back;
 134	int i;
 135
 136	for (i = 0; i < 6; i++)
 137		pci_read_config_byte(adapter->pdev,
 138				     (i + IXGBE_HV_RESET_OFFSET),
 139				     &hw->mac.perm_addr[i]);
 140	return 0;
 141#else
 142	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
 143	return -EOPNOTSUPP;
 144#endif
 145}
 146
 147/**
 148 *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
 149 *  @hw: pointer to hardware structure
 150 *
 151 *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
 152 *  disables transmit and receive units. The adapter_stopped flag is used by
 153 *  the shared code and drivers to determine if the adapter is in a stopped
 154 *  state and should not touch the hardware.
 155 **/
 156static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
 157{
 158	u32 number_of_queues;
 159	u32 reg_val;
 160	u16 i;
 161
 162	/* Set the adapter_stopped flag so other driver functions stop touching
 163	 * the hardware
 164	 */
 165	hw->adapter_stopped = true;
 166
 167	/* Disable the receive unit by stopped each queue */
 168	number_of_queues = hw->mac.max_rx_queues;
 169	for (i = 0; i < number_of_queues; i++) {
 170		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 171		if (reg_val & IXGBE_RXDCTL_ENABLE) {
 172			reg_val &= ~IXGBE_RXDCTL_ENABLE;
 173			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
 174		}
 175	}
 176
 177	IXGBE_WRITE_FLUSH(hw);
 178
 179	/* Clear interrupt mask to stop from interrupts being generated */
 180	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
 181
 182	/* Clear any pending interrupts */
 183	IXGBE_READ_REG(hw, IXGBE_VTEICR);
 184
 185	/* Disable the transmit unit.  Each queue must be disabled. */
 186	number_of_queues = hw->mac.max_tx_queues;
 187	for (i = 0; i < number_of_queues; i++) {
 188		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 189		if (reg_val & IXGBE_TXDCTL_ENABLE) {
 190			reg_val &= ~IXGBE_TXDCTL_ENABLE;
 191			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
 192		}
 193	}
 194
 195	return 0;
 196}
 197
 198/**
 199 *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
 200 *  @hw: pointer to hardware structure
 201 *  @mc_addr: the multicast address
 202 *
 203 *  Extracts the 12 bits, from a multicast address, to determine which
 204 *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
 205 *  incoming Rx multicast addresses, to determine the bit-vector to check in
 206 *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
 207 *  by the MO field of the MCSTCTRL. The MO field is set during initialization
 208 *  to mc_filter_type.
 209 **/
 210static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
 211{
 212	u32 vector = 0;
 213
 214	switch (hw->mac.mc_filter_type) {
 215	case 0:   /* use bits [47:36] of the address */
 216		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
 217		break;
 218	case 1:   /* use bits [46:35] of the address */
 219		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
 220		break;
 221	case 2:   /* use bits [45:34] of the address */
 222		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
 223		break;
 224	case 3:   /* use bits [43:32] of the address */
 225		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
 226		break;
 227	default:  /* Invalid mc_filter_type */
 228		break;
 229	}
 230
 231	/* vector can only be 12-bits or boundary will be exceeded */
 232	vector &= 0xFFF;
 233	return vector;
 234}
 235
 236/**
 237 *  ixgbevf_get_mac_addr_vf - Read device MAC address
 238 *  @hw: pointer to the HW structure
 239 *  @mac_addr: pointer to storage for retrieved MAC address
 240 **/
 241static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 242{
 243	ether_addr_copy(mac_addr, hw->mac.perm_addr);
 244
 245	return 0;
 246}
 247
 248static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 249{
 250	u32 msgbuf[3], msgbuf_chk;
 251	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 252	s32 ret_val;
 253
 254	memset(msgbuf, 0, sizeof(msgbuf));
 255	/* If index is one then this is the start of a new list and needs
 256	 * indication to the PF so it can do it's own list management.
 257	 * If it is zero then that tells the PF to just clear all of
 258	 * this VF's macvlans and there is no new list.
 259	 */
 260	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
 261	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
 262	msgbuf_chk = msgbuf[0];
 263
 264	if (addr)
 265		ether_addr_copy(msg_addr, addr);
 266
 267	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 268					     ARRAY_SIZE(msgbuf));
 269	if (!ret_val) {
 270		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 271
 272		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
 273			return -ENOMEM;
 274	}
 275
 276	return ret_val;
 277}
 278
 279static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 280{
 281	return -EOPNOTSUPP;
 282}
 283
 284/**
 285 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
 286 * @hw: pointer to hardware structure
 287 * @reta: buffer to fill with RETA contents.
 288 * @num_rx_queues: Number of Rx queues configured for this port
 289 *
 290 * The "reta" buffer should be big enough to contain 32 registers.
 291 *
 292 * Returns: 0 on success.
 293 *          if API doesn't support this operation - (-EOPNOTSUPP).
 294 */
 295int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
 296{
 297	int err, i, j;
 298	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 299	u32 *hw_reta = &msgbuf[1];
 300	u32 mask = 0;
 301
 302	/* We have to use a mailbox for 82599 and x540 devices only.
 303	 * For these devices RETA has 128 entries.
 304	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
 305	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
 306	 */
 307	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
 308
 309	/* We support the RSS querying for 82599 and x540 devices only.
 310	 * Thus return an error if API doesn't support RETA querying or querying
 311	 * is not supported for this device type.
 312	 */
 313	switch (hw->api_version) {
 
 314	case ixgbe_mbox_api_14:
 315	case ixgbe_mbox_api_13:
 316	case ixgbe_mbox_api_12:
 317		if (hw->mac.type < ixgbe_mac_X550_vf)
 318			break;
 319		fallthrough;
 320	default:
 321		return -EOPNOTSUPP;
 322	}
 323
 324	msgbuf[0] = IXGBE_VF_GET_RETA;
 325
 326	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 327
 328	if (err)
 329		return err;
 330
 331	err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
 332
 333	if (err)
 334		return err;
 335
 336	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 337
 338	/* If the operation has been refused by a PF return -EPERM */
 339	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
 340		return -EPERM;
 341
 342	/* If we didn't get an ACK there must have been
 343	 * some sort of mailbox error so we should treat it
 344	 * as such.
 345	 */
 346	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
 347		return IXGBE_ERR_MBX;
 348
 349	/* ixgbevf doesn't support more than 2 queues at the moment */
 350	if (num_rx_queues > 1)
 351		mask = 0x1;
 352
 353	for (i = 0; i < dwords; i++)
 354		for (j = 0; j < 16; j++)
 355			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
 356
 357	return 0;
 358}
 359
 360/**
 361 * ixgbevf_get_rss_key_locked - get the RSS Random Key
 362 * @hw: pointer to the HW structure
 363 * @rss_key: buffer to fill with RSS Hash Key contents.
 364 *
 365 * The "rss_key" buffer should be big enough to contain 10 registers.
 366 *
 367 * Returns: 0 on success.
 368 *          if API doesn't support this operation - (-EOPNOTSUPP).
 369 */
 370int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
 371{
 372	int err;
 373	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 374
 375	/* We currently support the RSS Random Key retrieval for 82599 and x540
 376	 * devices only.
 377	 *
 378	 * Thus return an error if API doesn't support RSS Random Key retrieval
 379	 * or if the operation is not supported for this device type.
 380	 */
 381	switch (hw->api_version) {
 
 382	case ixgbe_mbox_api_14:
 383	case ixgbe_mbox_api_13:
 384	case ixgbe_mbox_api_12:
 385		if (hw->mac.type < ixgbe_mac_X550_vf)
 386			break;
 387		fallthrough;
 388	default:
 389		return -EOPNOTSUPP;
 390	}
 391
 392	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
 393	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 394
 395	if (err)
 396		return err;
 397
 398	err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
 399
 400	if (err)
 401		return err;
 402
 403	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 404
 405	/* If the operation has been refused by a PF return -EPERM */
 406	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
 407		return -EPERM;
 408
 409	/* If we didn't get an ACK there must have been
 410	 * some sort of mailbox error so we should treat it
 411	 * as such.
 412	 */
 413	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
 414		return IXGBE_ERR_MBX;
 415
 416	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
 417
 418	return 0;
 419}
 420
 421/**
 422 *  ixgbevf_set_rar_vf - set device MAC address
 423 *  @hw: pointer to hardware structure
 424 *  @index: Receive address register to write
 425 *  @addr: Address to put into receive address register
 426 *  @vmdq: Unused in this implementation
 427 **/
 428static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 429			      u32 vmdq)
 430{
 431	u32 msgbuf[3];
 432	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 433	s32 ret_val;
 434
 435	memset(msgbuf, 0, sizeof(msgbuf));
 436	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
 437	ether_addr_copy(msg_addr, addr);
 438
 439	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 440					     ARRAY_SIZE(msgbuf));
 441	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 442
 443	/* if nacked the address was rejected, use "perm_addr" */
 444	if (!ret_val &&
 445	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
 446		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
 447		return IXGBE_ERR_MBX;
 448	}
 449
 450	return ret_val;
 451}
 452
 453/**
 454 *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
 455 *  @hw: pointer to hardware structure
 456 *  @index: Receive address register to write
 457 *  @addr: Address to put into receive address register
 458 *  @vmdq: Unused in this implementation
 459 *
 460 * We don't really allow setting the device MAC address. However,
 461 * if the address being set is the permanent MAC address we will
 462 * permit that.
 463 **/
 464static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 465				 u32 vmdq)
 466{
 467	if (ether_addr_equal(addr, hw->mac.perm_addr))
 468		return 0;
 469
 470	return -EOPNOTSUPP;
 471}
 472
 473/**
 474 *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
 475 *  @hw: pointer to the HW structure
 476 *  @netdev: pointer to net device structure
 477 *
 478 *  Updates the Multicast Table Array.
 479 **/
 480static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 481					  struct net_device *netdev)
 482{
 483	struct netdev_hw_addr *ha;
 484	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 485	u16 *vector_list = (u16 *)&msgbuf[1];
 486	u32 cnt, i;
 487
 488	/* Each entry in the list uses 1 16 bit word.  We have 30
 489	 * 16 bit words available in our HW msg buffer (minus 1 for the
 490	 * msg type).  That's 30 hash values if we pack 'em right.  If
 491	 * there are more than 30 MC addresses to add then punt the
 492	 * extras for now and then add code to handle more than 30 later.
 493	 * It would be unusual for a server to request that many multi-cast
 494	 * addresses except for in large enterprise network environments.
 495	 */
 496
 497	cnt = netdev_mc_count(netdev);
 498	if (cnt > 30)
 499		cnt = 30;
 500	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
 501	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
 502
 503	i = 0;
 504	netdev_for_each_mc_addr(ha, netdev) {
 505		if (i == cnt)
 506			break;
 507		if (is_link_local_ether_addr(ha->addr))
 508			continue;
 509
 510		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
 511	}
 512
 513	return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 514			IXGBE_VFMAILBOX_SIZE);
 515}
 516
 517/**
 518 * ixgbevf_hv_update_mc_addr_list_vf - stub
 519 * @hw: unused
 520 * @netdev: unused
 521 *
 522 * Hyper-V variant - just a stub.
 523 */
 524static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 525					     struct net_device *netdev)
 526{
 527	return -EOPNOTSUPP;
 528}
 529
 530/**
 531 *  ixgbevf_update_xcast_mode - Update Multicast mode
 532 *  @hw: pointer to the HW structure
 533 *  @xcast_mode: new multicast mode
 534 *
 535 *  Updates the Multicast Mode of VF.
 536 **/
 537static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 538{
 539	u32 msgbuf[2];
 540	s32 err;
 541
 542	switch (hw->api_version) {
 543	case ixgbe_mbox_api_12:
 544		/* promisc introduced in 1.3 version */
 545		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
 546			return -EOPNOTSUPP;
 547		fallthrough;
 
 548	case ixgbe_mbox_api_14:
 549	case ixgbe_mbox_api_13:
 550		break;
 551	default:
 552		return -EOPNOTSUPP;
 553	}
 554
 555	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
 556	msgbuf[1] = xcast_mode;
 557
 558	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 559					 ARRAY_SIZE(msgbuf));
 560	if (err)
 561		return err;
 562
 563	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 564	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
 565		return -EPERM;
 566
 567	return 0;
 568}
 569
 570/**
 571 * ixgbevf_hv_update_xcast_mode - stub
 572 * @hw: unused
 573 * @xcast_mode: unused
 574 *
 575 * Hyper-V variant - just a stub.
 576 */
 577static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 578{
 579	return -EOPNOTSUPP;
 580}
 581
 582/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 583 *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
 584 *  @hw: pointer to the HW structure
 585 *  @vlan: 12 bit VLAN ID
 586 *  @vind: unused by VF drivers
 587 *  @vlan_on: if true then set bit, else clear bit
 588 **/
 589static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 590			       bool vlan_on)
 591{
 592	u32 msgbuf[2];
 593	s32 err;
 594
 595	msgbuf[0] = IXGBE_VF_SET_VLAN;
 596	msgbuf[1] = vlan;
 597	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
 598	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 599
 600	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 601					 ARRAY_SIZE(msgbuf));
 602	if (err)
 603		goto mbx_err;
 604
 605	/* remove extra bits from the message */
 606	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 607	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
 608
 609	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
 610		err = IXGBE_ERR_INVALID_ARGUMENT;
 611
 612mbx_err:
 613	return err;
 614}
 615
 616/**
 617 * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
 618 * @hw: unused
 619 * @vlan: unused
 620 * @vind: unused
 621 * @vlan_on: unused
 622 */
 623static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 624				  bool vlan_on)
 625{
 626	return -EOPNOTSUPP;
 627}
 628
 629/**
 630 *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
 631 *  @hw: pointer to hardware structure
 632 *  @speed: Unused in this implementation
 633 *  @autoneg: Unused in this implementation
 634 *  @autoneg_wait_to_complete: Unused in this implementation
 635 *
 636 *  Do nothing and return success.  VF drivers are not allowed to change
 637 *  global settings.  Maintained for driver compatibility.
 638 **/
 639static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
 640				     ixgbe_link_speed speed, bool autoneg,
 641				     bool autoneg_wait_to_complete)
 642{
 643	return 0;
 644}
 645
 646/**
 647 *  ixgbevf_check_mac_link_vf - Get link/speed status
 648 *  @hw: pointer to hardware structure
 649 *  @speed: pointer to link speed
 650 *  @link_up: true is link is up, false otherwise
 651 *  @autoneg_wait_to_complete: unused
 652 *
 653 *  Reads the links register to determine if link is up and the current speed
 654 **/
 655static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
 656				     ixgbe_link_speed *speed,
 657				     bool *link_up,
 658				     bool autoneg_wait_to_complete)
 659{
 660	struct ixgbe_mbx_info *mbx = &hw->mbx;
 661	struct ixgbe_mac_info *mac = &hw->mac;
 662	s32 ret_val = 0;
 663	u32 links_reg;
 664	u32 in_msg = 0;
 665
 666	/* If we were hit with a reset drop the link */
 667	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 668		mac->get_link_status = true;
 669
 670	if (!mac->get_link_status)
 671		goto out;
 672
 673	/* if link status is down no point in checking to see if pf is up */
 674	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 675	if (!(links_reg & IXGBE_LINKS_UP))
 676		goto out;
 677
 678	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 679	 * before the link status is correct
 680	 */
 681	if (mac->type == ixgbe_mac_82599_vf) {
 682		int i;
 683
 684		for (i = 0; i < 5; i++) {
 685			udelay(100);
 686			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 687
 688			if (!(links_reg & IXGBE_LINKS_UP))
 689				goto out;
 690		}
 691	}
 692
 693	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 694	case IXGBE_LINKS_SPEED_10G_82599:
 695		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 696		break;
 697	case IXGBE_LINKS_SPEED_1G_82599:
 698		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 699		break;
 700	case IXGBE_LINKS_SPEED_100_82599:
 701		*speed = IXGBE_LINK_SPEED_100_FULL;
 702		break;
 703	}
 704
 705	/* if the read failed it could just be a mailbox collision, best wait
 706	 * until we are called again and don't report an error
 707	 */
 708	if (mbx->ops.read(hw, &in_msg, 1))
 
 
 709		goto out;
 
 710
 711	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
 712		/* msg is not CTS and is NACK we must have lost CTS status */
 713		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
 714			ret_val = -1;
 715		goto out;
 716	}
 717
 718	/* the pf is talking, if we timed out in the past we reinit */
 719	if (!mbx->timeout) {
 720		ret_val = -1;
 721		goto out;
 722	}
 723
 724	/* if we passed all the tests above then the link is up and we no
 725	 * longer need to check for link
 726	 */
 727	mac->get_link_status = false;
 728
 729out:
 730	*link_up = !mac->get_link_status;
 731	return ret_val;
 732}
 733
 734/**
 735 * ixgbevf_hv_check_mac_link_vf - check link
 736 * @hw: pointer to private hardware struct
 737 * @speed: pointer to link speed
 738 * @link_up: true is link is up, false otherwise
 739 * @autoneg_wait_to_complete: unused
 740 *
 741 * Hyper-V variant; there is no mailbox communication.
 742 */
 743static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
 744					ixgbe_link_speed *speed,
 745					bool *link_up,
 746					bool autoneg_wait_to_complete)
 747{
 748	struct ixgbe_mbx_info *mbx = &hw->mbx;
 749	struct ixgbe_mac_info *mac = &hw->mac;
 750	u32 links_reg;
 751
 752	/* If we were hit with a reset drop the link */
 753	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 754		mac->get_link_status = true;
 755
 756	if (!mac->get_link_status)
 757		goto out;
 758
 759	/* if link status is down no point in checking to see if pf is up */
 760	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 761	if (!(links_reg & IXGBE_LINKS_UP))
 762		goto out;
 763
 764	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 765	 * before the link status is correct
 766	 */
 767	if (mac->type == ixgbe_mac_82599_vf) {
 768		int i;
 769
 770		for (i = 0; i < 5; i++) {
 771			udelay(100);
 772			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 773
 774			if (!(links_reg & IXGBE_LINKS_UP))
 775				goto out;
 776		}
 777	}
 778
 779	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 780	case IXGBE_LINKS_SPEED_10G_82599:
 781		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 782		break;
 783	case IXGBE_LINKS_SPEED_1G_82599:
 784		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 785		break;
 786	case IXGBE_LINKS_SPEED_100_82599:
 787		*speed = IXGBE_LINK_SPEED_100_FULL;
 788		break;
 789	}
 790
 791	/* if we passed all the tests above then the link is up and we no
 792	 * longer need to check for link
 793	 */
 794	mac->get_link_status = false;
 795
 796out:
 797	*link_up = !mac->get_link_status;
 798	return 0;
 799}
 800
 801/**
 802 *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
 803 *  @hw: pointer to the HW structure
 804 *  @max_size: value to assign to max frame size
 805 **/
 806static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 807{
 808	u32 msgbuf[2];
 809	s32 ret_val;
 810
 811	msgbuf[0] = IXGBE_VF_SET_LPE;
 812	msgbuf[1] = max_size;
 813
 814	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 815					     ARRAY_SIZE(msgbuf));
 816	if (ret_val)
 817		return ret_val;
 818	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
 819	    (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
 820		return IXGBE_ERR_MBX;
 821
 822	return 0;
 823}
 824
 825/**
 826 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
 827 * @hw: pointer to the HW structure
 828 * @max_size: value to assign to max frame size
 829 * Hyper-V variant.
 830 **/
 831static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 832{
 833	u32 reg;
 834
 835	/* If we are on Hyper-V, we implement this functionality
 836	 * differently.
 837	 */
 838	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
 839	/* CRC == 4 */
 840	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
 841	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
 842
 843	return 0;
 844}
 845
 846/**
 847 *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
 848 *  @hw: pointer to the HW structure
 849 *  @api: integer containing requested API version
 850 **/
 851static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 852{
 853	int err;
 854	u32 msg[3];
 855
 856	/* Negotiate the mailbox API version */
 857	msg[0] = IXGBE_VF_API_NEGOTIATE;
 858	msg[1] = api;
 859	msg[2] = 0;
 860
 861	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 862	if (!err) {
 863		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 864
 865		/* Store value and return 0 on success */
 866		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
 
 867			hw->api_version = api;
 868			return 0;
 869		}
 870
 871		err = IXGBE_ERR_INVALID_ARGUMENT;
 872	}
 873
 874	return err;
 875}
 876
 877/**
 878 *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
 879 *  @hw: pointer to the HW structure
 880 *  @api: integer containing requested API version
 881 *  Hyper-V version - only ixgbe_mbox_api_10 supported.
 882 **/
 883static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 884{
 885	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
 886	if (api != ixgbe_mbox_api_10)
 887		return IXGBE_ERR_INVALID_ARGUMENT;
 888
 889	return 0;
 890}
 891
 892int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 893		       unsigned int *default_tc)
 894{
 895	int err;
 896	u32 msg[5];
 897
 898	/* do nothing if API doesn't support ixgbevf_get_queues */
 899	switch (hw->api_version) {
 900	case ixgbe_mbox_api_11:
 901	case ixgbe_mbox_api_12:
 902	case ixgbe_mbox_api_13:
 903	case ixgbe_mbox_api_14:
 
 904		break;
 905	default:
 906		return 0;
 907	}
 908
 909	/* Fetch queue configuration from the PF */
 910	msg[0] = IXGBE_VF_GET_QUEUE;
 911	msg[1] = msg[2] = msg[3] = msg[4] = 0;
 912
 913	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 914	if (!err) {
 915		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 916
 917		/* if we we didn't get an ACK there must have been
 918		 * some sort of mailbox error so we should treat it
 919		 * as such
 920		 */
 921		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
 922			return IXGBE_ERR_MBX;
 923
 924		/* record and validate values from message */
 925		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
 926		if (hw->mac.max_tx_queues == 0 ||
 927		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
 928			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
 929
 930		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
 931		if (hw->mac.max_rx_queues == 0 ||
 932		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
 933			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
 934
 935		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
 936		/* in case of unknown state assume we cannot tag frames */
 937		if (*num_tcs > hw->mac.max_rx_queues)
 938			*num_tcs = 1;
 939
 940		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
 941		/* default to queue 0 on out-of-bounds queue number */
 942		if (*default_tc >= hw->mac.max_tx_queues)
 943			*default_tc = 0;
 944	}
 945
 946	return err;
 947}
 948
 949static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
 950	.init_hw		= ixgbevf_init_hw_vf,
 951	.reset_hw		= ixgbevf_reset_hw_vf,
 952	.start_hw		= ixgbevf_start_hw_vf,
 953	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
 954	.stop_adapter		= ixgbevf_stop_hw_vf,
 955	.setup_link		= ixgbevf_setup_mac_link_vf,
 956	.check_link		= ixgbevf_check_mac_link_vf,
 957	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
 958	.set_rar		= ixgbevf_set_rar_vf,
 959	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
 960	.update_xcast_mode	= ixgbevf_update_xcast_mode,
 
 961	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
 962	.set_vfta		= ixgbevf_set_vfta_vf,
 963	.set_rlpml		= ixgbevf_set_rlpml_vf,
 964};
 965
 966static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
 967	.init_hw		= ixgbevf_init_hw_vf,
 968	.reset_hw		= ixgbevf_hv_reset_hw_vf,
 969	.start_hw		= ixgbevf_start_hw_vf,
 970	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
 971	.stop_adapter		= ixgbevf_stop_hw_vf,
 972	.setup_link		= ixgbevf_setup_mac_link_vf,
 973	.check_link		= ixgbevf_hv_check_mac_link_vf,
 974	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
 975	.set_rar		= ixgbevf_hv_set_rar_vf,
 976	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
 977	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
 
 978	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
 979	.set_vfta		= ixgbevf_hv_set_vfta_vf,
 980	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
 981};
 982
 983const struct ixgbevf_info ixgbevf_82599_vf_info = {
 984	.mac = ixgbe_mac_82599_vf,
 985	.mac_ops = &ixgbevf_mac_ops,
 986};
 987
 988const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
 989	.mac = ixgbe_mac_82599_vf,
 990	.mac_ops = &ixgbevf_hv_mac_ops,
 991};
 992
 993const struct ixgbevf_info ixgbevf_X540_vf_info = {
 994	.mac = ixgbe_mac_X540_vf,
 995	.mac_ops = &ixgbevf_mac_ops,
 996};
 997
 998const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
 999	.mac = ixgbe_mac_X540_vf,
1000	.mac_ops = &ixgbevf_hv_mac_ops,
1001};
1002
1003const struct ixgbevf_info ixgbevf_X550_vf_info = {
1004	.mac = ixgbe_mac_X550_vf,
1005	.mac_ops = &ixgbevf_mac_ops,
1006};
1007
1008const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1009	.mac = ixgbe_mac_X550_vf,
1010	.mac_ops = &ixgbevf_hv_mac_ops,
1011};
1012
1013const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1014	.mac = ixgbe_mac_X550EM_x_vf,
1015	.mac_ops = &ixgbevf_mac_ops,
1016};
1017
1018const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1019	.mac = ixgbe_mac_X550EM_x_vf,
1020	.mac_ops = &ixgbevf_hv_mac_ops,
1021};
1022
1023const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1024	.mac = ixgbe_mac_x550em_a_vf,
1025	.mac_ops = &ixgbevf_mac_ops,
1026};