Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4#include "vf.h"
   5#include "ixgbevf.h"
   6
   7/* On Hyper-V, to reset, we need to read from this offset
   8 * from the PCI config space. This is the mechanism used on
   9 * Hyper-V to support PF/VF communication.
  10 */
  11#define IXGBE_HV_RESET_OFFSET           0x201
  12
  13static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
  14					     u32 *retmsg, u16 size)
  15{
  16	s32 retval = ixgbevf_write_mbx(hw, msg, size);
 
  17
  18	if (retval)
  19		return retval;
  20
  21	return ixgbevf_poll_mbx(hw, retmsg, size);
  22}
  23
  24/**
  25 *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  26 *  @hw: pointer to hardware structure
  27 *
  28 *  Starts the hardware by filling the bus info structure and media type, clears
  29 *  all on chip counters, initializes receive address registers, multicast
  30 *  table, VLAN filter table, calls routine to set up link and flow control
  31 *  settings, and leaves transmit and receive units disabled and uninitialized
  32 **/
  33static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
  34{
  35	/* Clear adapter stopped flag */
  36	hw->adapter_stopped = false;
  37
  38	return 0;
  39}
  40
  41/**
  42 *  ixgbevf_init_hw_vf - virtual function hardware initialization
  43 *  @hw: pointer to hardware structure
  44 *
  45 *  Initialize the hardware by resetting the hardware and then starting
  46 *  the hardware
  47 **/
  48static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  49{
  50	s32 status = hw->mac.ops.start_hw(hw);
  51
  52	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
  53
  54	return status;
  55}
  56
  57/**
  58 *  ixgbevf_reset_hw_vf - Performs hardware reset
  59 *  @hw: pointer to hardware structure
  60 *
  61 *  Resets the hardware by resetting the transmit and receive units, masks and
  62 *  clears all interrupts.
  63 **/
  64static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
  65{
  66	struct ixgbe_mbx_info *mbx = &hw->mbx;
  67	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
 
  68	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
  69	u8 *addr = (u8 *)(&msgbuf[1]);
  70	s32 ret_val;
  71
  72	/* Call adapter stop to disable tx/rx and clear interrupts */
  73	hw->mac.ops.stop_adapter(hw);
  74
  75	/* reset the api version */
  76	hw->api_version = ixgbe_mbox_api_10;
  77	hw->mbx.ops.init_params(hw);
  78	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
  79	       sizeof(struct ixgbe_mbx_operations));
  80
  81	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
  82	IXGBE_WRITE_FLUSH(hw);
  83
  84	/* we cannot reset while the RSTI / RSTD bits are asserted */
  85	while (!mbx->ops.check_for_rst(hw) && timeout) {
  86		timeout--;
  87		udelay(5);
  88	}
  89
  90	if (!timeout)
  91		return IXGBE_ERR_RESET_FAILED;
  92
  93	/* mailbox timeout can now become active */
  94	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
  95
  96	msgbuf[0] = IXGBE_VF_RESET;
  97	ixgbevf_write_mbx(hw, msgbuf, 1);
  98
  99	mdelay(10);
 100
 101	/* set our "perm_addr" based on info provided by PF
 102	 * also set up the mc_filter_type which is piggy backed
 103	 * on the mac address in word 3
 104	 */
 105	ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
 106	if (ret_val)
 107		return ret_val;
 108
 109	/* New versions of the PF may NACK the reset return message
 110	 * to indicate that no MAC address has yet been assigned for
 111	 * the VF.
 112	 */
 113	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
 114	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
 115		return IXGBE_ERR_INVALID_MAC_ADDR;
 116
 117	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
 118		ether_addr_copy(hw->mac.perm_addr, addr);
 119
 120	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 121
 122	return 0;
 123}
 124
 125/**
 126 * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
 127 * @hw: pointer to private hardware struct
 128 *
 129 * Hyper-V variant; the VF/PF communication is through the PCI
 130 * config space.
 
 131 */
 132static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
 133{
 134#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
 135	struct ixgbevf_adapter *adapter = hw->back;
 136	int i;
 137
 138	for (i = 0; i < 6; i++)
 139		pci_read_config_byte(adapter->pdev,
 140				     (i + IXGBE_HV_RESET_OFFSET),
 141				     &hw->mac.perm_addr[i]);
 142	return 0;
 143#else
 144	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
 145	return -EOPNOTSUPP;
 146#endif
 147}
 148
 149/**
 150 *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
 151 *  @hw: pointer to hardware structure
 152 *
 153 *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
 154 *  disables transmit and receive units. The adapter_stopped flag is used by
 155 *  the shared code and drivers to determine if the adapter is in a stopped
 156 *  state and should not touch the hardware.
 157 **/
 158static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
 159{
 160	u32 number_of_queues;
 161	u32 reg_val;
 162	u16 i;
 163
 164	/* Set the adapter_stopped flag so other driver functions stop touching
 165	 * the hardware
 166	 */
 167	hw->adapter_stopped = true;
 168
 169	/* Disable the receive unit by stopped each queue */
 170	number_of_queues = hw->mac.max_rx_queues;
 171	for (i = 0; i < number_of_queues; i++) {
 172		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 173		if (reg_val & IXGBE_RXDCTL_ENABLE) {
 174			reg_val &= ~IXGBE_RXDCTL_ENABLE;
 175			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
 176		}
 177	}
 178
 179	IXGBE_WRITE_FLUSH(hw);
 180
 181	/* Clear interrupt mask to stop from interrupts being generated */
 182	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
 183
 184	/* Clear any pending interrupts */
 185	IXGBE_READ_REG(hw, IXGBE_VTEICR);
 186
 187	/* Disable the transmit unit.  Each queue must be disabled. */
 188	number_of_queues = hw->mac.max_tx_queues;
 189	for (i = 0; i < number_of_queues; i++) {
 190		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 191		if (reg_val & IXGBE_TXDCTL_ENABLE) {
 192			reg_val &= ~IXGBE_TXDCTL_ENABLE;
 193			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
 194		}
 195	}
 196
 197	return 0;
 198}
 199
 200/**
 201 *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
 202 *  @hw: pointer to hardware structure
 203 *  @mc_addr: the multicast address
 204 *
 205 *  Extracts the 12 bits, from a multicast address, to determine which
 206 *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
 207 *  incoming Rx multicast addresses, to determine the bit-vector to check in
 208 *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
 209 *  by the MO field of the MCSTCTRL. The MO field is set during initialization
 210 *  to mc_filter_type.
 211 **/
 212static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
 213{
 214	u32 vector = 0;
 215
 216	switch (hw->mac.mc_filter_type) {
 217	case 0:   /* use bits [47:36] of the address */
 218		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
 219		break;
 220	case 1:   /* use bits [46:35] of the address */
 221		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
 222		break;
 223	case 2:   /* use bits [45:34] of the address */
 224		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
 225		break;
 226	case 3:   /* use bits [43:32] of the address */
 227		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
 228		break;
 229	default:  /* Invalid mc_filter_type */
 230		break;
 231	}
 232
 233	/* vector can only be 12-bits or boundary will be exceeded */
 234	vector &= 0xFFF;
 235	return vector;
 236}
 237
 238/**
 239 *  ixgbevf_get_mac_addr_vf - Read device MAC address
 240 *  @hw: pointer to the HW structure
 241 *  @mac_addr: pointer to storage for retrieved MAC address
 242 **/
 243static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 244{
 245	ether_addr_copy(mac_addr, hw->mac.perm_addr);
 246
 247	return 0;
 248}
 249
 250static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 251{
 252	u32 msgbuf[3], msgbuf_chk;
 253	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 254	s32 ret_val;
 255
 256	memset(msgbuf, 0, sizeof(msgbuf));
 257	/* If index is one then this is the start of a new list and needs
 258	 * indication to the PF so it can do it's own list management.
 259	 * If it is zero then that tells the PF to just clear all of
 260	 * this VF's macvlans and there is no new list.
 261	 */
 262	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
 263	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
 264	msgbuf_chk = msgbuf[0];
 265
 266	if (addr)
 267		ether_addr_copy(msg_addr, addr);
 268
 269	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 270					     ARRAY_SIZE(msgbuf));
 271	if (!ret_val) {
 272		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 273
 274		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
 275			return -ENOMEM;
 276	}
 277
 278	return ret_val;
 279}
 280
 281static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 282{
 283	return -EOPNOTSUPP;
 284}
 285
 286/**
 287 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
 288 * @hw: pointer to hardware structure
 289 * @reta: buffer to fill with RETA contents.
 290 * @num_rx_queues: Number of Rx queues configured for this port
 291 *
 292 * The "reta" buffer should be big enough to contain 32 registers.
 293 *
 294 * Returns: 0 on success.
 295 *          if API doesn't support this operation - (-EOPNOTSUPP).
 296 */
 297int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
 298{
 299	int err, i, j;
 300	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 301	u32 *hw_reta = &msgbuf[1];
 302	u32 mask = 0;
 303
 304	/* We have to use a mailbox for 82599 and x540 devices only.
 305	 * For these devices RETA has 128 entries.
 306	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
 307	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
 308	 */
 309	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
 310
 311	/* We support the RSS querying for 82599 and x540 devices only.
 312	 * Thus return an error if API doesn't support RETA querying or querying
 313	 * is not supported for this device type.
 314	 */
 315	switch (hw->api_version) {
 316	case ixgbe_mbox_api_15:
 317	case ixgbe_mbox_api_14:
 318	case ixgbe_mbox_api_13:
 319	case ixgbe_mbox_api_12:
 320		if (hw->mac.type < ixgbe_mac_X550_vf)
 321			break;
 322		fallthrough;
 323	default:
 324		return -EOPNOTSUPP;
 325	}
 326
 327	msgbuf[0] = IXGBE_VF_GET_RETA;
 328
 329	err = ixgbevf_write_mbx(hw, msgbuf, 1);
 330
 331	if (err)
 332		return err;
 333
 334	err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
 335
 336	if (err)
 337		return err;
 338
 339	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 340
 341	/* If the operation has been refused by a PF return -EPERM */
 342	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
 343		return -EPERM;
 344
 345	/* If we didn't get an ACK there must have been
 346	 * some sort of mailbox error so we should treat it
 347	 * as such.
 348	 */
 349	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
 350		return IXGBE_ERR_MBX;
 351
 352	/* ixgbevf doesn't support more than 2 queues at the moment */
 353	if (num_rx_queues > 1)
 354		mask = 0x1;
 355
 356	for (i = 0; i < dwords; i++)
 357		for (j = 0; j < 16; j++)
 358			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
 359
 360	return 0;
 361}
 362
 363/**
 364 * ixgbevf_get_rss_key_locked - get the RSS Random Key
 365 * @hw: pointer to the HW structure
 366 * @rss_key: buffer to fill with RSS Hash Key contents.
 367 *
 368 * The "rss_key" buffer should be big enough to contain 10 registers.
 369 *
 370 * Returns: 0 on success.
 371 *          if API doesn't support this operation - (-EOPNOTSUPP).
 372 */
 373int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
 374{
 375	int err;
 376	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 377
 378	/* We currently support the RSS Random Key retrieval for 82599 and x540
 379	 * devices only.
 380	 *
 381	 * Thus return an error if API doesn't support RSS Random Key retrieval
 382	 * or if the operation is not supported for this device type.
 383	 */
 384	switch (hw->api_version) {
 385	case ixgbe_mbox_api_15:
 386	case ixgbe_mbox_api_14:
 387	case ixgbe_mbox_api_13:
 388	case ixgbe_mbox_api_12:
 389		if (hw->mac.type < ixgbe_mac_X550_vf)
 390			break;
 391		fallthrough;
 392	default:
 393		return -EOPNOTSUPP;
 394	}
 395
 396	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
 397	err = ixgbevf_write_mbx(hw, msgbuf, 1);
 398
 399	if (err)
 400		return err;
 401
 402	err = ixgbevf_poll_mbx(hw, msgbuf, 11);
 403
 404	if (err)
 405		return err;
 406
 407	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 408
 409	/* If the operation has been refused by a PF return -EPERM */
 410	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
 411		return -EPERM;
 412
 413	/* If we didn't get an ACK there must have been
 414	 * some sort of mailbox error so we should treat it
 415	 * as such.
 416	 */
 417	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
 418		return IXGBE_ERR_MBX;
 419
 420	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
 421
 422	return 0;
 423}
 424
 425/**
 426 *  ixgbevf_set_rar_vf - set device MAC address
 427 *  @hw: pointer to hardware structure
 428 *  @index: Receive address register to write
 429 *  @addr: Address to put into receive address register
 430 *  @vmdq: Unused in this implementation
 431 **/
 432static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 433			      u32 vmdq)
 434{
 435	u32 msgbuf[3];
 436	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 437	s32 ret_val;
 438
 439	memset(msgbuf, 0, sizeof(msgbuf));
 440	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
 441	ether_addr_copy(msg_addr, addr);
 442
 443	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 444					     ARRAY_SIZE(msgbuf));
 445	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 446
 447	/* if nacked the address was rejected, use "perm_addr" */
 448	if (!ret_val &&
 449	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
 450		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
 451		return IXGBE_ERR_MBX;
 452	}
 453
 454	return ret_val;
 455}
 456
 457/**
 458 *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
 459 *  @hw: pointer to hardware structure
 460 *  @index: Receive address register to write
 461 *  @addr: Address to put into receive address register
 462 *  @vmdq: Unused in this implementation
 463 *
 464 * We don't really allow setting the device MAC address. However,
 465 * if the address being set is the permanent MAC address we will
 466 * permit that.
 467 **/
 468static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 469				 u32 vmdq)
 470{
 471	if (ether_addr_equal(addr, hw->mac.perm_addr))
 472		return 0;
 473
 474	return -EOPNOTSUPP;
 475}
 476
 477/**
 478 *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
 479 *  @hw: pointer to the HW structure
 480 *  @netdev: pointer to net device structure
 481 *
 482 *  Updates the Multicast Table Array.
 483 **/
 484static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 485					  struct net_device *netdev)
 486{
 487	struct netdev_hw_addr *ha;
 488	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 489	u16 *vector_list = (u16 *)&msgbuf[1];
 490	u32 cnt, i;
 491
 492	/* Each entry in the list uses 1 16 bit word.  We have 30
 493	 * 16 bit words available in our HW msg buffer (minus 1 for the
 494	 * msg type).  That's 30 hash values if we pack 'em right.  If
 495	 * there are more than 30 MC addresses to add then punt the
 496	 * extras for now and then add code to handle more than 30 later.
 497	 * It would be unusual for a server to request that many multi-cast
 498	 * addresses except for in large enterprise network environments.
 499	 */
 500
 501	cnt = netdev_mc_count(netdev);
 502	if (cnt > 30)
 503		cnt = 30;
 504	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
 505	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
 506
 507	i = 0;
 508	netdev_for_each_mc_addr(ha, netdev) {
 509		if (i == cnt)
 510			break;
 511		if (is_link_local_ether_addr(ha->addr))
 512			continue;
 513
 514		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
 515	}
 516
 517	return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 518			IXGBE_VFMAILBOX_SIZE);
 519}
 520
 521/**
 522 * ixgbevf_hv_update_mc_addr_list_vf - stub
 523 * @hw: unused
 524 * @netdev: unused
 525 *
 526 * Hyper-V variant - just a stub.
 527 */
 528static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 529					     struct net_device *netdev)
 530{
 531	return -EOPNOTSUPP;
 532}
 533
 534/**
 535 *  ixgbevf_update_xcast_mode - Update Multicast mode
 536 *  @hw: pointer to the HW structure
 537 *  @xcast_mode: new multicast mode
 538 *
 539 *  Updates the Multicast Mode of VF.
 540 **/
 541static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 542{
 543	u32 msgbuf[2];
 544	s32 err;
 545
 546	switch (hw->api_version) {
 547	case ixgbe_mbox_api_12:
 548		/* promisc introduced in 1.3 version */
 549		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
 550			return -EOPNOTSUPP;
 551		fallthrough;
 552	case ixgbe_mbox_api_13:
 553	case ixgbe_mbox_api_14:
 554	case ixgbe_mbox_api_15:
 555		break;
 556	default:
 557		return -EOPNOTSUPP;
 558	}
 559
 560	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
 561	msgbuf[1] = xcast_mode;
 562
 563	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 564					 ARRAY_SIZE(msgbuf));
 565	if (err)
 566		return err;
 567
 568	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 569	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
 570		return -EPERM;
 571
 572	return 0;
 573}
 574
 575/**
 576 * ixgbevf_hv_update_xcast_mode - stub
 577 * @hw: unused
 578 * @xcast_mode: unused
 579 *
 580 * Hyper-V variant - just a stub.
 581 */
 582static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 583{
 584	return -EOPNOTSUPP;
 585}
 586
 587/**
 588 * ixgbevf_get_link_state_vf - Get VF link state from PF
 589 * @hw: pointer to the HW structure
 590 * @link_state: link state storage
 591 *
 592 * Returns state of the operation error or success.
 593 */
 594static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
 595{
 596	u32 msgbuf[2];
 597	s32 ret_val;
 598	s32 err;
 599
 600	msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
 601	msgbuf[1] = 0x0;
 602
 603	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
 604
 605	if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
 606		ret_val = IXGBE_ERR_MBX;
 607	} else {
 608		ret_val = 0;
 609		*link_state = msgbuf[1];
 610	}
 611
 612	return ret_val;
 613}
 614
 615/**
 616 * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
 617 * @hw: unused
 618 * @link_state: unused
 619 *
 620 * Hyper-V variant; there is no mailbox communication.
 621 */
 622static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
 623{
 624	return -EOPNOTSUPP;
 625}
 626
 627/**
 628 *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
 629 *  @hw: pointer to the HW structure
 630 *  @vlan: 12 bit VLAN ID
 631 *  @vind: unused by VF drivers
 632 *  @vlan_on: if true then set bit, else clear bit
 633 **/
 634static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 635			       bool vlan_on)
 636{
 637	u32 msgbuf[2];
 638	s32 err;
 639
 640	msgbuf[0] = IXGBE_VF_SET_VLAN;
 641	msgbuf[1] = vlan;
 642	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
 643	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 644
 645	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 646					 ARRAY_SIZE(msgbuf));
 647	if (err)
 648		goto mbx_err;
 649
 650	/* remove extra bits from the message */
 651	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 652	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
 653
 654	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
 655		err = IXGBE_ERR_INVALID_ARGUMENT;
 656
 657mbx_err:
 658	return err;
 659}
 660
 661/**
 662 * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
 663 * @hw: unused
 664 * @vlan: unused
 665 * @vind: unused
 666 * @vlan_on: unused
 667 */
 668static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 669				  bool vlan_on)
 670{
 671	return -EOPNOTSUPP;
 672}
 673
 674/**
 675 *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
 676 *  @hw: pointer to hardware structure
 677 *  @speed: Unused in this implementation
 678 *  @autoneg: Unused in this implementation
 679 *  @autoneg_wait_to_complete: Unused in this implementation
 680 *
 681 *  Do nothing and return success.  VF drivers are not allowed to change
 682 *  global settings.  Maintained for driver compatibility.
 683 **/
 684static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
 685				     ixgbe_link_speed speed, bool autoneg,
 686				     bool autoneg_wait_to_complete)
 687{
 688	return 0;
 689}
 690
 691/**
 692 *  ixgbevf_check_mac_link_vf - Get link/speed status
 693 *  @hw: pointer to hardware structure
 694 *  @speed: pointer to link speed
 695 *  @link_up: true is link is up, false otherwise
 696 *  @autoneg_wait_to_complete: unused
 697 *
 698 *  Reads the links register to determine if link is up and the current speed
 699 **/
 700static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
 701				     ixgbe_link_speed *speed,
 702				     bool *link_up,
 703				     bool autoneg_wait_to_complete)
 704{
 705	struct ixgbe_mbx_info *mbx = &hw->mbx;
 706	struct ixgbe_mac_info *mac = &hw->mac;
 707	s32 ret_val = 0;
 708	u32 links_reg;
 709	u32 in_msg = 0;
 710
 711	/* If we were hit with a reset drop the link */
 712	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 713		mac->get_link_status = true;
 714
 715	if (!mac->get_link_status)
 716		goto out;
 717
 718	/* if link status is down no point in checking to see if pf is up */
 719	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 720	if (!(links_reg & IXGBE_LINKS_UP))
 721		goto out;
 722
 723	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 724	 * before the link status is correct
 725	 */
 726	if (mac->type == ixgbe_mac_82599_vf) {
 727		int i;
 728
 729		for (i = 0; i < 5; i++) {
 730			udelay(100);
 731			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 732
 733			if (!(links_reg & IXGBE_LINKS_UP))
 734				goto out;
 735		}
 736	}
 737
 738	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 739	case IXGBE_LINKS_SPEED_10G_82599:
 740		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 741		break;
 742	case IXGBE_LINKS_SPEED_1G_82599:
 743		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 744		break;
 745	case IXGBE_LINKS_SPEED_100_82599:
 746		*speed = IXGBE_LINK_SPEED_100_FULL;
 747		break;
 748	}
 749
 750	/* if the read failed it could just be a mailbox collision, best wait
 751	 * until we are called again and don't report an error
 752	 */
 753	if (mbx->ops.read(hw, &in_msg, 1)) {
 754		if (hw->api_version >= ixgbe_mbox_api_15)
 755			mac->get_link_status = false;
 756		goto out;
 757	}
 758
 759	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
 760		/* msg is not CTS and is NACK we must have lost CTS status */
 761		if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
 762			ret_val = -1;
 763		goto out;
 764	}
 765
 766	/* the pf is talking, if we timed out in the past we reinit */
 767	if (!mbx->timeout) {
 768		ret_val = -1;
 769		goto out;
 770	}
 771
 772	/* if we passed all the tests above then the link is up and we no
 773	 * longer need to check for link
 774	 */
 775	mac->get_link_status = false;
 776
 777out:
 778	*link_up = !mac->get_link_status;
 779	return ret_val;
 780}
 781
 782/**
 783 * ixgbevf_hv_check_mac_link_vf - check link
 784 * @hw: pointer to private hardware struct
 785 * @speed: pointer to link speed
 786 * @link_up: true is link is up, false otherwise
 787 * @autoneg_wait_to_complete: unused
 788 *
 789 * Hyper-V variant; there is no mailbox communication.
 790 */
 791static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
 792					ixgbe_link_speed *speed,
 793					bool *link_up,
 794					bool autoneg_wait_to_complete)
 795{
 796	struct ixgbe_mbx_info *mbx = &hw->mbx;
 797	struct ixgbe_mac_info *mac = &hw->mac;
 798	u32 links_reg;
 799
 800	/* If we were hit with a reset drop the link */
 801	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 802		mac->get_link_status = true;
 803
 804	if (!mac->get_link_status)
 805		goto out;
 806
 807	/* if link status is down no point in checking to see if pf is up */
 808	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 809	if (!(links_reg & IXGBE_LINKS_UP))
 810		goto out;
 811
 812	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 813	 * before the link status is correct
 814	 */
 815	if (mac->type == ixgbe_mac_82599_vf) {
 816		int i;
 817
 818		for (i = 0; i < 5; i++) {
 819			udelay(100);
 820			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 821
 822			if (!(links_reg & IXGBE_LINKS_UP))
 823				goto out;
 824		}
 825	}
 826
 827	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 828	case IXGBE_LINKS_SPEED_10G_82599:
 829		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 830		break;
 831	case IXGBE_LINKS_SPEED_1G_82599:
 832		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 833		break;
 834	case IXGBE_LINKS_SPEED_100_82599:
 835		*speed = IXGBE_LINK_SPEED_100_FULL;
 836		break;
 837	}
 838
 839	/* if we passed all the tests above then the link is up and we no
 840	 * longer need to check for link
 841	 */
 842	mac->get_link_status = false;
 843
 844out:
 845	*link_up = !mac->get_link_status;
 846	return 0;
 847}
 848
 849/**
 850 *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
 851 *  @hw: pointer to the HW structure
 852 *  @max_size: value to assign to max frame size
 853 **/
 854static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 855{
 856	u32 msgbuf[2];
 857	s32 ret_val;
 858
 859	msgbuf[0] = IXGBE_VF_SET_LPE;
 860	msgbuf[1] = max_size;
 861
 862	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 863					     ARRAY_SIZE(msgbuf));
 864	if (ret_val)
 865		return ret_val;
 866	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
 867	    (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
 868		return IXGBE_ERR_MBX;
 869
 870	return 0;
 871}
 872
 873/**
 874 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
 875 * @hw: pointer to the HW structure
 876 * @max_size: value to assign to max frame size
 877 * Hyper-V variant.
 878 **/
 879static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 880{
 881	u32 reg;
 882
 883	/* If we are on Hyper-V, we implement this functionality
 884	 * differently.
 885	 */
 886	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
 887	/* CRC == 4 */
 888	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
 889	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
 890
 891	return 0;
 892}
 893
 894/**
 895 *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
 896 *  @hw: pointer to the HW structure
 897 *  @api: integer containing requested API version
 898 **/
 899static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 900{
 901	int err;
 902	u32 msg[3];
 903
 904	/* Negotiate the mailbox API version */
 905	msg[0] = IXGBE_VF_API_NEGOTIATE;
 906	msg[1] = api;
 907	msg[2] = 0;
 908
 909	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 910	if (!err) {
 911		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 912
 913		/* Store value and return 0 on success */
 914		if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
 915			      IXGBE_VT_MSGTYPE_SUCCESS)) {
 916			hw->api_version = api;
 917			return 0;
 918		}
 919
 920		err = IXGBE_ERR_INVALID_ARGUMENT;
 921	}
 922
 923	return err;
 924}
 925
 926/**
 927 *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
 928 *  @hw: pointer to the HW structure
 929 *  @api: integer containing requested API version
 930 *  Hyper-V version - only ixgbe_mbox_api_10 supported.
 931 **/
 932static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 933{
 934	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
 935	if (api != ixgbe_mbox_api_10)
 936		return IXGBE_ERR_INVALID_ARGUMENT;
 937
 938	return 0;
 939}
 940
 941int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 942		       unsigned int *default_tc)
 943{
 944	int err;
 945	u32 msg[5];
 946
 947	/* do nothing if API doesn't support ixgbevf_get_queues */
 948	switch (hw->api_version) {
 949	case ixgbe_mbox_api_11:
 950	case ixgbe_mbox_api_12:
 951	case ixgbe_mbox_api_13:
 952	case ixgbe_mbox_api_14:
 953	case ixgbe_mbox_api_15:
 954		break;
 955	default:
 956		return 0;
 957	}
 958
 959	/* Fetch queue configuration from the PF */
 960	msg[0] = IXGBE_VF_GET_QUEUE;
 961	msg[1] = msg[2] = msg[3] = msg[4] = 0;
 962
 963	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 964	if (!err) {
 965		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 966
 967		/* if we didn't get an ACK there must have been
 968		 * some sort of mailbox error so we should treat it
 969		 * as such
 970		 */
 971		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
 972			return IXGBE_ERR_MBX;
 973
 974		/* record and validate values from message */
 975		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
 976		if (hw->mac.max_tx_queues == 0 ||
 977		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
 978			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
 979
 980		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
 981		if (hw->mac.max_rx_queues == 0 ||
 982		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
 983			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
 984
 985		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
 986		/* in case of unknown state assume we cannot tag frames */
 987		if (*num_tcs > hw->mac.max_rx_queues)
 988			*num_tcs = 1;
 989
 990		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
 991		/* default to queue 0 on out-of-bounds queue number */
 992		if (*default_tc >= hw->mac.max_tx_queues)
 993			*default_tc = 0;
 994	}
 995
 996	return err;
 997}
 998
 999static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
1000	.init_hw		= ixgbevf_init_hw_vf,
1001	.reset_hw		= ixgbevf_reset_hw_vf,
1002	.start_hw		= ixgbevf_start_hw_vf,
1003	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
1004	.stop_adapter		= ixgbevf_stop_hw_vf,
1005	.setup_link		= ixgbevf_setup_mac_link_vf,
1006	.check_link		= ixgbevf_check_mac_link_vf,
1007	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
1008	.set_rar		= ixgbevf_set_rar_vf,
1009	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
1010	.update_xcast_mode	= ixgbevf_update_xcast_mode,
1011	.get_link_state		= ixgbevf_get_link_state_vf,
1012	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
1013	.set_vfta		= ixgbevf_set_vfta_vf,
1014	.set_rlpml		= ixgbevf_set_rlpml_vf,
1015};
1016
1017static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
1018	.init_hw		= ixgbevf_init_hw_vf,
1019	.reset_hw		= ixgbevf_hv_reset_hw_vf,
1020	.start_hw		= ixgbevf_start_hw_vf,
1021	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
1022	.stop_adapter		= ixgbevf_stop_hw_vf,
1023	.setup_link		= ixgbevf_setup_mac_link_vf,
1024	.check_link		= ixgbevf_hv_check_mac_link_vf,
1025	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
1026	.set_rar		= ixgbevf_hv_set_rar_vf,
1027	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
1028	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
1029	.get_link_state		= ixgbevf_hv_get_link_state_vf,
1030	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
1031	.set_vfta		= ixgbevf_hv_set_vfta_vf,
1032	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
1033};
1034
1035const struct ixgbevf_info ixgbevf_82599_vf_info = {
1036	.mac = ixgbe_mac_82599_vf,
1037	.mac_ops = &ixgbevf_mac_ops,
1038};
1039
1040const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
1041	.mac = ixgbe_mac_82599_vf,
1042	.mac_ops = &ixgbevf_hv_mac_ops,
1043};
1044
1045const struct ixgbevf_info ixgbevf_X540_vf_info = {
1046	.mac = ixgbe_mac_X540_vf,
1047	.mac_ops = &ixgbevf_mac_ops,
1048};
1049
1050const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1051	.mac = ixgbe_mac_X540_vf,
1052	.mac_ops = &ixgbevf_hv_mac_ops,
1053};
1054
1055const struct ixgbevf_info ixgbevf_X550_vf_info = {
1056	.mac = ixgbe_mac_X550_vf,
1057	.mac_ops = &ixgbevf_mac_ops,
1058};
1059
1060const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1061	.mac = ixgbe_mac_X550_vf,
1062	.mac_ops = &ixgbevf_hv_mac_ops,
1063};
1064
1065const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1066	.mac = ixgbe_mac_X550EM_x_vf,
1067	.mac_ops = &ixgbevf_mac_ops,
1068};
1069
1070const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1071	.mac = ixgbe_mac_X550EM_x_vf,
1072	.mac_ops = &ixgbevf_hv_mac_ops,
1073};
1074
1075const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1076	.mac = ixgbe_mac_x550em_a_vf,
1077	.mac_ops = &ixgbevf_mac_ops,
1078};
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4#include "vf.h"
   5#include "ixgbevf.h"
   6
   7/* On Hyper-V, to reset, we need to read from this offset
   8 * from the PCI config space. This is the mechanism used on
   9 * Hyper-V to support PF/VF communication.
  10 */
  11#define IXGBE_HV_RESET_OFFSET           0x201
  12
  13static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
  14					     u32 *retmsg, u16 size)
  15{
  16	struct ixgbe_mbx_info *mbx = &hw->mbx;
  17	s32 retval = mbx->ops.write_posted(hw, msg, size);
  18
  19	if (retval)
  20		return retval;
  21
  22	return mbx->ops.read_posted(hw, retmsg, size);
  23}
  24
  25/**
  26 *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  27 *  @hw: pointer to hardware structure
  28 *
  29 *  Starts the hardware by filling the bus info structure and media type, clears
  30 *  all on chip counters, initializes receive address registers, multicast
  31 *  table, VLAN filter table, calls routine to set up link and flow control
  32 *  settings, and leaves transmit and receive units disabled and uninitialized
  33 **/
  34static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
  35{
  36	/* Clear adapter stopped flag */
  37	hw->adapter_stopped = false;
  38
  39	return 0;
  40}
  41
  42/**
  43 *  ixgbevf_init_hw_vf - virtual function hardware initialization
  44 *  @hw: pointer to hardware structure
  45 *
  46 *  Initialize the hardware by resetting the hardware and then starting
  47 *  the hardware
  48 **/
  49static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  50{
  51	s32 status = hw->mac.ops.start_hw(hw);
  52
  53	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
  54
  55	return status;
  56}
  57
  58/**
  59 *  ixgbevf_reset_hw_vf - Performs hardware reset
  60 *  @hw: pointer to hardware structure
  61 *
  62 *  Resets the hardware by resetting the transmit and receive units, masks and
  63 *  clears all interrupts.
  64 **/
  65static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
  66{
  67	struct ixgbe_mbx_info *mbx = &hw->mbx;
  68	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
  69	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
  70	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
  71	u8 *addr = (u8 *)(&msgbuf[1]);
 
  72
  73	/* Call adapter stop to disable tx/rx and clear interrupts */
  74	hw->mac.ops.stop_adapter(hw);
  75
  76	/* reset the api version */
  77	hw->api_version = ixgbe_mbox_api_10;
 
 
 
  78
  79	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
  80	IXGBE_WRITE_FLUSH(hw);
  81
  82	/* we cannot reset while the RSTI / RSTD bits are asserted */
  83	while (!mbx->ops.check_for_rst(hw) && timeout) {
  84		timeout--;
  85		udelay(5);
  86	}
  87
  88	if (!timeout)
  89		return IXGBE_ERR_RESET_FAILED;
  90
  91	/* mailbox timeout can now become active */
  92	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
  93
  94	msgbuf[0] = IXGBE_VF_RESET;
  95	mbx->ops.write_posted(hw, msgbuf, 1);
  96
  97	mdelay(10);
  98
  99	/* set our "perm_addr" based on info provided by PF
 100	 * also set up the mc_filter_type which is piggy backed
 101	 * on the mac address in word 3
 102	 */
 103	ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
 104	if (ret_val)
 105		return ret_val;
 106
 107	/* New versions of the PF may NACK the reset return message
 108	 * to indicate that no MAC address has yet been assigned for
 109	 * the VF.
 110	 */
 111	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
 112	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
 113		return IXGBE_ERR_INVALID_MAC_ADDR;
 114
 115	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
 116		ether_addr_copy(hw->mac.perm_addr, addr);
 117
 118	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 119
 120	return 0;
 121}
 122
 123/**
 
 
 
 124 * Hyper-V variant; the VF/PF communication is through the PCI
 125 * config space.
 126 * @hw: pointer to private hardware struct
 127 */
 128static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
 129{
 130#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
 131	struct ixgbevf_adapter *adapter = hw->back;
 132	int i;
 133
 134	for (i = 0; i < 6; i++)
 135		pci_read_config_byte(adapter->pdev,
 136				     (i + IXGBE_HV_RESET_OFFSET),
 137				     &hw->mac.perm_addr[i]);
 138	return 0;
 139#else
 140	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
 141	return -EOPNOTSUPP;
 142#endif
 143}
 144
 145/**
 146 *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
 147 *  @hw: pointer to hardware structure
 148 *
 149 *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
 150 *  disables transmit and receive units. The adapter_stopped flag is used by
 151 *  the shared code and drivers to determine if the adapter is in a stopped
 152 *  state and should not touch the hardware.
 153 **/
 154static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
 155{
 156	u32 number_of_queues;
 157	u32 reg_val;
 158	u16 i;
 159
 160	/* Set the adapter_stopped flag so other driver functions stop touching
 161	 * the hardware
 162	 */
 163	hw->adapter_stopped = true;
 164
 165	/* Disable the receive unit by stopped each queue */
 166	number_of_queues = hw->mac.max_rx_queues;
 167	for (i = 0; i < number_of_queues; i++) {
 168		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 169		if (reg_val & IXGBE_RXDCTL_ENABLE) {
 170			reg_val &= ~IXGBE_RXDCTL_ENABLE;
 171			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
 172		}
 173	}
 174
 175	IXGBE_WRITE_FLUSH(hw);
 176
 177	/* Clear interrupt mask to stop from interrupts being generated */
 178	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
 179
 180	/* Clear any pending interrupts */
 181	IXGBE_READ_REG(hw, IXGBE_VTEICR);
 182
 183	/* Disable the transmit unit.  Each queue must be disabled. */
 184	number_of_queues = hw->mac.max_tx_queues;
 185	for (i = 0; i < number_of_queues; i++) {
 186		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 187		if (reg_val & IXGBE_TXDCTL_ENABLE) {
 188			reg_val &= ~IXGBE_TXDCTL_ENABLE;
 189			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
 190		}
 191	}
 192
 193	return 0;
 194}
 195
 196/**
 197 *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
 198 *  @hw: pointer to hardware structure
 199 *  @mc_addr: the multicast address
 200 *
 201 *  Extracts the 12 bits, from a multicast address, to determine which
 202 *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
 203 *  incoming Rx multicast addresses, to determine the bit-vector to check in
 204 *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
 205 *  by the MO field of the MCSTCTRL. The MO field is set during initialization
 206 *  to mc_filter_type.
 207 **/
 208static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
 209{
 210	u32 vector = 0;
 211
 212	switch (hw->mac.mc_filter_type) {
 213	case 0:   /* use bits [47:36] of the address */
 214		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
 215		break;
 216	case 1:   /* use bits [46:35] of the address */
 217		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
 218		break;
 219	case 2:   /* use bits [45:34] of the address */
 220		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
 221		break;
 222	case 3:   /* use bits [43:32] of the address */
 223		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
 224		break;
 225	default:  /* Invalid mc_filter_type */
 226		break;
 227	}
 228
 229	/* vector can only be 12-bits or boundary will be exceeded */
 230	vector &= 0xFFF;
 231	return vector;
 232}
 233
 234/**
 235 *  ixgbevf_get_mac_addr_vf - Read device MAC address
 236 *  @hw: pointer to the HW structure
 237 *  @mac_addr: pointer to storage for retrieved MAC address
 238 **/
 239static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 240{
 241	ether_addr_copy(mac_addr, hw->mac.perm_addr);
 242
 243	return 0;
 244}
 245
 246static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 247{
 248	u32 msgbuf[3], msgbuf_chk;
 249	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 250	s32 ret_val;
 251
 252	memset(msgbuf, 0, sizeof(msgbuf));
 253	/* If index is one then this is the start of a new list and needs
 254	 * indication to the PF so it can do it's own list management.
 255	 * If it is zero then that tells the PF to just clear all of
 256	 * this VF's macvlans and there is no new list.
 257	 */
 258	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
 259	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
 260	msgbuf_chk = msgbuf[0];
 261
 262	if (addr)
 263		ether_addr_copy(msg_addr, addr);
 264
 265	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 266					     ARRAY_SIZE(msgbuf));
 267	if (!ret_val) {
 268		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 269
 270		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
 271			return -ENOMEM;
 272	}
 273
 274	return ret_val;
 275}
 276
 277static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 278{
 279	return -EOPNOTSUPP;
 280}
 281
 282/**
 283 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
 284 * @hw: pointer to hardware structure
 285 * @reta: buffer to fill with RETA contents.
 286 * @num_rx_queues: Number of Rx queues configured for this port
 287 *
 288 * The "reta" buffer should be big enough to contain 32 registers.
 289 *
 290 * Returns: 0 on success.
 291 *          if API doesn't support this operation - (-EOPNOTSUPP).
 292 */
 293int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
 294{
 295	int err, i, j;
 296	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 297	u32 *hw_reta = &msgbuf[1];
 298	u32 mask = 0;
 299
 300	/* We have to use a mailbox for 82599 and x540 devices only.
 301	 * For these devices RETA has 128 entries.
 302	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
 303	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
 304	 */
 305	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
 306
 307	/* We support the RSS querying for 82599 and x540 devices only.
 308	 * Thus return an error if API doesn't support RETA querying or querying
 309	 * is not supported for this device type.
 310	 */
 311	switch (hw->api_version) {
 
 312	case ixgbe_mbox_api_14:
 313	case ixgbe_mbox_api_13:
 314	case ixgbe_mbox_api_12:
 315		if (hw->mac.type < ixgbe_mac_X550_vf)
 316			break;
 317		/* fall through */
 318	default:
 319		return -EOPNOTSUPP;
 320	}
 321
 322	msgbuf[0] = IXGBE_VF_GET_RETA;
 323
 324	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 325
 326	if (err)
 327		return err;
 328
 329	err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
 330
 331	if (err)
 332		return err;
 333
 334	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 335
 336	/* If the operation has been refused by a PF return -EPERM */
 337	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
 338		return -EPERM;
 339
 340	/* If we didn't get an ACK there must have been
 341	 * some sort of mailbox error so we should treat it
 342	 * as such.
 343	 */
 344	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
 345		return IXGBE_ERR_MBX;
 346
 347	/* ixgbevf doesn't support more than 2 queues at the moment */
 348	if (num_rx_queues > 1)
 349		mask = 0x1;
 350
 351	for (i = 0; i < dwords; i++)
 352		for (j = 0; j < 16; j++)
 353			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
 354
 355	return 0;
 356}
 357
 358/**
 359 * ixgbevf_get_rss_key_locked - get the RSS Random Key
 360 * @hw: pointer to the HW structure
 361 * @rss_key: buffer to fill with RSS Hash Key contents.
 362 *
 363 * The "rss_key" buffer should be big enough to contain 10 registers.
 364 *
 365 * Returns: 0 on success.
 366 *          if API doesn't support this operation - (-EOPNOTSUPP).
 367 */
 368int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
 369{
 370	int err;
 371	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 372
 373	/* We currently support the RSS Random Key retrieval for 82599 and x540
 374	 * devices only.
 375	 *
 376	 * Thus return an error if API doesn't support RSS Random Key retrieval
 377	 * or if the operation is not supported for this device type.
 378	 */
 379	switch (hw->api_version) {
 
 380	case ixgbe_mbox_api_14:
 381	case ixgbe_mbox_api_13:
 382	case ixgbe_mbox_api_12:
 383		if (hw->mac.type < ixgbe_mac_X550_vf)
 384			break;
 385		/* fall through */
 386	default:
 387		return -EOPNOTSUPP;
 388	}
 389
 390	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
 391	err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 392
 393	if (err)
 394		return err;
 395
 396	err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
 397
 398	if (err)
 399		return err;
 400
 401	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 402
 403	/* If the operation has been refused by a PF return -EPERM */
 404	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
 405		return -EPERM;
 406
 407	/* If we didn't get an ACK there must have been
 408	 * some sort of mailbox error so we should treat it
 409	 * as such.
 410	 */
 411	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
 412		return IXGBE_ERR_MBX;
 413
 414	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
 415
 416	return 0;
 417}
 418
 419/**
 420 *  ixgbevf_set_rar_vf - set device MAC address
 421 *  @hw: pointer to hardware structure
 422 *  @index: Receive address register to write
 423 *  @addr: Address to put into receive address register
 424 *  @vmdq: Unused in this implementation
 425 **/
 426static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 427			      u32 vmdq)
 428{
 429	u32 msgbuf[3];
 430	u8 *msg_addr = (u8 *)(&msgbuf[1]);
 431	s32 ret_val;
 432
 433	memset(msgbuf, 0, sizeof(msgbuf));
 434	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
 435	ether_addr_copy(msg_addr, addr);
 436
 437	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 438					     ARRAY_SIZE(msgbuf));
 439	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 440
 441	/* if nacked the address was rejected, use "perm_addr" */
 442	if (!ret_val &&
 443	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
 444		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
 445		return IXGBE_ERR_MBX;
 446	}
 447
 448	return ret_val;
 449}
 450
 451/**
 452 *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
 453 *  @hw: pointer to hardware structure
 454 *  @index: Receive address register to write
 455 *  @addr: Address to put into receive address register
 456 *  @vmdq: Unused in this implementation
 457 *
 458 * We don't really allow setting the device MAC address. However,
 459 * if the address being set is the permanent MAC address we will
 460 * permit that.
 461 **/
 462static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 463				 u32 vmdq)
 464{
 465	if (ether_addr_equal(addr, hw->mac.perm_addr))
 466		return 0;
 467
 468	return -EOPNOTSUPP;
 469}
 470
 471/**
 472 *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
 473 *  @hw: pointer to the HW structure
 474 *  @netdev: pointer to net device structure
 475 *
 476 *  Updates the Multicast Table Array.
 477 **/
 478static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 479					  struct net_device *netdev)
 480{
 481	struct netdev_hw_addr *ha;
 482	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 483	u16 *vector_list = (u16 *)&msgbuf[1];
 484	u32 cnt, i;
 485
 486	/* Each entry in the list uses 1 16 bit word.  We have 30
 487	 * 16 bit words available in our HW msg buffer (minus 1 for the
 488	 * msg type).  That's 30 hash values if we pack 'em right.  If
 489	 * there are more than 30 MC addresses to add then punt the
 490	 * extras for now and then add code to handle more than 30 later.
 491	 * It would be unusual for a server to request that many multi-cast
 492	 * addresses except for in large enterprise network environments.
 493	 */
 494
 495	cnt = netdev_mc_count(netdev);
 496	if (cnt > 30)
 497		cnt = 30;
 498	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
 499	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
 500
 501	i = 0;
 502	netdev_for_each_mc_addr(ha, netdev) {
 503		if (i == cnt)
 504			break;
 505		if (is_link_local_ether_addr(ha->addr))
 506			continue;
 507
 508		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
 509	}
 510
 511	return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 512			IXGBE_VFMAILBOX_SIZE);
 513}
 514
 515/**
 516 * Hyper-V variant - just a stub.
 517 * @hw: unused
 518 * @netdev: unused
 
 
 519 */
 520static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 521					     struct net_device *netdev)
 522{
 523	return -EOPNOTSUPP;
 524}
 525
 526/**
 527 *  ixgbevf_update_xcast_mode - Update Multicast mode
 528 *  @hw: pointer to the HW structure
 529 *  @xcast_mode: new multicast mode
 530 *
 531 *  Updates the Multicast Mode of VF.
 532 **/
 533static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 534{
 535	u32 msgbuf[2];
 536	s32 err;
 537
 538	switch (hw->api_version) {
 539	case ixgbe_mbox_api_12:
 540		/* promisc introduced in 1.3 version */
 541		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
 542			return -EOPNOTSUPP;
 543		/* Fall threw */
 
 544	case ixgbe_mbox_api_14:
 545	case ixgbe_mbox_api_13:
 546		break;
 547	default:
 548		return -EOPNOTSUPP;
 549	}
 550
 551	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
 552	msgbuf[1] = xcast_mode;
 553
 554	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 555					 ARRAY_SIZE(msgbuf));
 556	if (err)
 557		return err;
 558
 559	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 560	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
 561		return -EPERM;
 562
 563	return 0;
 564}
 565
 566/**
 567 * Hyper-V variant - just a stub.
 568 * @hw: unused
 569 * @xcast_mode: unused
 
 
 570 */
 571static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 572{
 573	return -EOPNOTSUPP;
 574}
 575
 576/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 577 *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
 578 *  @hw: pointer to the HW structure
 579 *  @vlan: 12 bit VLAN ID
 580 *  @vind: unused by VF drivers
 581 *  @vlan_on: if true then set bit, else clear bit
 582 **/
 583static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 584			       bool vlan_on)
 585{
 586	u32 msgbuf[2];
 587	s32 err;
 588
 589	msgbuf[0] = IXGBE_VF_SET_VLAN;
 590	msgbuf[1] = vlan;
 591	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
 592	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 593
 594	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 595					 ARRAY_SIZE(msgbuf));
 596	if (err)
 597		goto mbx_err;
 598
 599	/* remove extra bits from the message */
 600	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 601	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
 602
 603	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
 604		err = IXGBE_ERR_INVALID_ARGUMENT;
 605
 606mbx_err:
 607	return err;
 608}
 609
 610/**
 611 * Hyper-V variant - just a stub.
 612 * @hw: unused
 613 * @vlan: unused
 614 * @vind: unused
 615 * @vlan_on: unused
 616 */
 617static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 618				  bool vlan_on)
 619{
 620	return -EOPNOTSUPP;
 621}
 622
 623/**
 624 *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
 625 *  @hw: pointer to hardware structure
 626 *  @speed: Unused in this implementation
 627 *  @autoneg: Unused in this implementation
 628 *  @autoneg_wait_to_complete: Unused in this implementation
 629 *
 630 *  Do nothing and return success.  VF drivers are not allowed to change
 631 *  global settings.  Maintained for driver compatibility.
 632 **/
 633static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
 634				     ixgbe_link_speed speed, bool autoneg,
 635				     bool autoneg_wait_to_complete)
 636{
 637	return 0;
 638}
 639
 640/**
 641 *  ixgbevf_check_mac_link_vf - Get link/speed status
 642 *  @hw: pointer to hardware structure
 643 *  @speed: pointer to link speed
 644 *  @link_up: true is link is up, false otherwise
 645 *  @autoneg_wait_to_complete: unused
 646 *
 647 *  Reads the links register to determine if link is up and the current speed
 648 **/
 649static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
 650				     ixgbe_link_speed *speed,
 651				     bool *link_up,
 652				     bool autoneg_wait_to_complete)
 653{
 654	struct ixgbe_mbx_info *mbx = &hw->mbx;
 655	struct ixgbe_mac_info *mac = &hw->mac;
 656	s32 ret_val = 0;
 657	u32 links_reg;
 658	u32 in_msg = 0;
 659
 660	/* If we were hit with a reset drop the link */
 661	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 662		mac->get_link_status = true;
 663
 664	if (!mac->get_link_status)
 665		goto out;
 666
 667	/* if link status is down no point in checking to see if pf is up */
 668	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 669	if (!(links_reg & IXGBE_LINKS_UP))
 670		goto out;
 671
 672	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 673	 * before the link status is correct
 674	 */
 675	if (mac->type == ixgbe_mac_82599_vf) {
 676		int i;
 677
 678		for (i = 0; i < 5; i++) {
 679			udelay(100);
 680			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 681
 682			if (!(links_reg & IXGBE_LINKS_UP))
 683				goto out;
 684		}
 685	}
 686
 687	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 688	case IXGBE_LINKS_SPEED_10G_82599:
 689		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 690		break;
 691	case IXGBE_LINKS_SPEED_1G_82599:
 692		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 693		break;
 694	case IXGBE_LINKS_SPEED_100_82599:
 695		*speed = IXGBE_LINK_SPEED_100_FULL;
 696		break;
 697	}
 698
 699	/* if the read failed it could just be a mailbox collision, best wait
 700	 * until we are called again and don't report an error
 701	 */
 702	if (mbx->ops.read(hw, &in_msg, 1))
 
 
 703		goto out;
 
 704
 705	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
 706		/* msg is not CTS and is NACK we must have lost CTS status */
 707		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
 708			ret_val = -1;
 709		goto out;
 710	}
 711
 712	/* the pf is talking, if we timed out in the past we reinit */
 713	if (!mbx->timeout) {
 714		ret_val = -1;
 715		goto out;
 716	}
 717
 718	/* if we passed all the tests above then the link is up and we no
 719	 * longer need to check for link
 720	 */
 721	mac->get_link_status = false;
 722
 723out:
 724	*link_up = !mac->get_link_status;
 725	return ret_val;
 726}
 727
 728/**
 729 * Hyper-V variant; there is no mailbox communication.
 730 * @hw: pointer to private hardware struct
 731 * @speed: pointer to link speed
 732 * @link_up: true is link is up, false otherwise
 733 * @autoneg_wait_to_complete: unused
 
 
 734 */
 735static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
 736					ixgbe_link_speed *speed,
 737					bool *link_up,
 738					bool autoneg_wait_to_complete)
 739{
 740	struct ixgbe_mbx_info *mbx = &hw->mbx;
 741	struct ixgbe_mac_info *mac = &hw->mac;
 742	u32 links_reg;
 743
 744	/* If we were hit with a reset drop the link */
 745	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 746		mac->get_link_status = true;
 747
 748	if (!mac->get_link_status)
 749		goto out;
 750
 751	/* if link status is down no point in checking to see if pf is up */
 752	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 753	if (!(links_reg & IXGBE_LINKS_UP))
 754		goto out;
 755
 756	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 757	 * before the link status is correct
 758	 */
 759	if (mac->type == ixgbe_mac_82599_vf) {
 760		int i;
 761
 762		for (i = 0; i < 5; i++) {
 763			udelay(100);
 764			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 765
 766			if (!(links_reg & IXGBE_LINKS_UP))
 767				goto out;
 768		}
 769	}
 770
 771	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 772	case IXGBE_LINKS_SPEED_10G_82599:
 773		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 774		break;
 775	case IXGBE_LINKS_SPEED_1G_82599:
 776		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 777		break;
 778	case IXGBE_LINKS_SPEED_100_82599:
 779		*speed = IXGBE_LINK_SPEED_100_FULL;
 780		break;
 781	}
 782
 783	/* if we passed all the tests above then the link is up and we no
 784	 * longer need to check for link
 785	 */
 786	mac->get_link_status = false;
 787
 788out:
 789	*link_up = !mac->get_link_status;
 790	return 0;
 791}
 792
 793/**
 794 *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
 795 *  @hw: pointer to the HW structure
 796 *  @max_size: value to assign to max frame size
 797 **/
 798static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 799{
 800	u32 msgbuf[2];
 801	s32 ret_val;
 802
 803	msgbuf[0] = IXGBE_VF_SET_LPE;
 804	msgbuf[1] = max_size;
 805
 806	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 807					     ARRAY_SIZE(msgbuf));
 808	if (ret_val)
 809		return ret_val;
 810	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
 811	    (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
 812		return IXGBE_ERR_MBX;
 813
 814	return 0;
 815}
 816
 817/**
 818 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
 819 * @hw: pointer to the HW structure
 820 * @max_size: value to assign to max frame size
 821 * Hyper-V variant.
 822 **/
 823static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 824{
 825	u32 reg;
 826
 827	/* If we are on Hyper-V, we implement this functionality
 828	 * differently.
 829	 */
 830	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
 831	/* CRC == 4 */
 832	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
 833	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
 834
 835	return 0;
 836}
 837
 838/**
 839 *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
 840 *  @hw: pointer to the HW structure
 841 *  @api: integer containing requested API version
 842 **/
 843static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 844{
 845	int err;
 846	u32 msg[3];
 847
 848	/* Negotiate the mailbox API version */
 849	msg[0] = IXGBE_VF_API_NEGOTIATE;
 850	msg[1] = api;
 851	msg[2] = 0;
 852
 853	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 854	if (!err) {
 855		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 856
 857		/* Store value and return 0 on success */
 858		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
 
 859			hw->api_version = api;
 860			return 0;
 861		}
 862
 863		err = IXGBE_ERR_INVALID_ARGUMENT;
 864	}
 865
 866	return err;
 867}
 868
 869/**
 870 *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
 871 *  @hw: pointer to the HW structure
 872 *  @api: integer containing requested API version
 873 *  Hyper-V version - only ixgbe_mbox_api_10 supported.
 874 **/
 875static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 876{
 877	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
 878	if (api != ixgbe_mbox_api_10)
 879		return IXGBE_ERR_INVALID_ARGUMENT;
 880
 881	return 0;
 882}
 883
 884int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 885		       unsigned int *default_tc)
 886{
 887	int err;
 888	u32 msg[5];
 889
 890	/* do nothing if API doesn't support ixgbevf_get_queues */
 891	switch (hw->api_version) {
 892	case ixgbe_mbox_api_11:
 893	case ixgbe_mbox_api_12:
 894	case ixgbe_mbox_api_13:
 895	case ixgbe_mbox_api_14:
 
 896		break;
 897	default:
 898		return 0;
 899	}
 900
 901	/* Fetch queue configuration from the PF */
 902	msg[0] = IXGBE_VF_GET_QUEUE;
 903	msg[1] = msg[2] = msg[3] = msg[4] = 0;
 904
 905	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 906	if (!err) {
 907		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 908
 909		/* if we we didn't get an ACK there must have been
 910		 * some sort of mailbox error so we should treat it
 911		 * as such
 912		 */
 913		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
 914			return IXGBE_ERR_MBX;
 915
 916		/* record and validate values from message */
 917		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
 918		if (hw->mac.max_tx_queues == 0 ||
 919		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
 920			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
 921
 922		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
 923		if (hw->mac.max_rx_queues == 0 ||
 924		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
 925			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
 926
 927		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
 928		/* in case of unknown state assume we cannot tag frames */
 929		if (*num_tcs > hw->mac.max_rx_queues)
 930			*num_tcs = 1;
 931
 932		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
 933		/* default to queue 0 on out-of-bounds queue number */
 934		if (*default_tc >= hw->mac.max_tx_queues)
 935			*default_tc = 0;
 936	}
 937
 938	return err;
 939}
 940
 941static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
 942	.init_hw		= ixgbevf_init_hw_vf,
 943	.reset_hw		= ixgbevf_reset_hw_vf,
 944	.start_hw		= ixgbevf_start_hw_vf,
 945	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
 946	.stop_adapter		= ixgbevf_stop_hw_vf,
 947	.setup_link		= ixgbevf_setup_mac_link_vf,
 948	.check_link		= ixgbevf_check_mac_link_vf,
 949	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
 950	.set_rar		= ixgbevf_set_rar_vf,
 951	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
 952	.update_xcast_mode	= ixgbevf_update_xcast_mode,
 
 953	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
 954	.set_vfta		= ixgbevf_set_vfta_vf,
 955	.set_rlpml		= ixgbevf_set_rlpml_vf,
 956};
 957
 958static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
 959	.init_hw		= ixgbevf_init_hw_vf,
 960	.reset_hw		= ixgbevf_hv_reset_hw_vf,
 961	.start_hw		= ixgbevf_start_hw_vf,
 962	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
 963	.stop_adapter		= ixgbevf_stop_hw_vf,
 964	.setup_link		= ixgbevf_setup_mac_link_vf,
 965	.check_link		= ixgbevf_hv_check_mac_link_vf,
 966	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
 967	.set_rar		= ixgbevf_hv_set_rar_vf,
 968	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
 969	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
 
 970	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
 971	.set_vfta		= ixgbevf_hv_set_vfta_vf,
 972	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
 973};
 974
 975const struct ixgbevf_info ixgbevf_82599_vf_info = {
 976	.mac = ixgbe_mac_82599_vf,
 977	.mac_ops = &ixgbevf_mac_ops,
 978};
 979
 980const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
 981	.mac = ixgbe_mac_82599_vf,
 982	.mac_ops = &ixgbevf_hv_mac_ops,
 983};
 984
 985const struct ixgbevf_info ixgbevf_X540_vf_info = {
 986	.mac = ixgbe_mac_X540_vf,
 987	.mac_ops = &ixgbevf_mac_ops,
 988};
 989
 990const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
 991	.mac = ixgbe_mac_X540_vf,
 992	.mac_ops = &ixgbevf_hv_mac_ops,
 993};
 994
 995const struct ixgbevf_info ixgbevf_X550_vf_info = {
 996	.mac = ixgbe_mac_X550_vf,
 997	.mac_ops = &ixgbevf_mac_ops,
 998};
 999
1000const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1001	.mac = ixgbe_mac_X550_vf,
1002	.mac_ops = &ixgbevf_hv_mac_ops,
1003};
1004
1005const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1006	.mac = ixgbe_mac_X550EM_x_vf,
1007	.mac_ops = &ixgbevf_mac_ops,
1008};
1009
1010const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1011	.mac = ixgbe_mac_X550EM_x_vf,
1012	.mac_ops = &ixgbevf_hv_mac_ops,
1013};
1014
1015const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1016	.mac = ixgbe_mac_x550em_a_vf,
1017	.mac_ops = &ixgbevf_mac_ops,
1018};