Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Intel(R) Ethernet Switch Host Interface Driver
   3 * Copyright(c) 2013 - 2018 Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * The full GNU General Public License is included in this distribution in
  15 * the file called "COPYING".
  16 *
  17 * Contact Information:
  18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20 */
  21
  22#include "fm10k_pf.h"
  23#include "fm10k_vf.h"
  24
  25/**
  26 *  fm10k_reset_hw_pf - PF hardware reset
  27 *  @hw: pointer to hardware structure
  28 *
  29 *  This function should return the hardware to a state similar to the
  30 *  one it is in after being powered on.
  31 **/
  32static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
  33{
  34	s32 err;
  35	u32 reg;
  36	u16 i;
  37
  38	/* Disable interrupts */
  39	fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
  40
  41	/* Lock ITR2 reg 0 into itself and disable interrupt moderation */
  42	fm10k_write_reg(hw, FM10K_ITR2(0), 0);
  43	fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
  44
  45	/* We assume here Tx and Rx queue 0 are owned by the PF */
  46
  47	/* Shut off VF access to their queues forcing them to queue 0 */
  48	for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
  49		fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
  50		fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
  51	}
  52
  53	/* shut down all rings */
  54	err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
  55	if (err == FM10K_ERR_REQUESTS_PENDING) {
  56		hw->mac.reset_while_pending++;
  57		goto force_reset;
  58	} else if (err) {
  59		return err;
  60	}
  61
  62	/* Verify that DMA is no longer active */
  63	reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);
  64	if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
  65		return FM10K_ERR_DMA_PENDING;
  66
  67force_reset:
  68	/* Inititate data path reset */
  69	reg = FM10K_DMA_CTRL_DATAPATH_RESET;
  70	fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
  71
  72	/* Flush write and allow 100us for reset to complete */
  73	fm10k_write_flush(hw);
  74	udelay(FM10K_RESET_TIMEOUT);
  75
 
 
 
 
  76	/* Verify we made it out of reset */
  77	reg = fm10k_read_reg(hw, FM10K_IP);
  78	if (!(reg & FM10K_IP_NOTINRESET))
  79		return FM10K_ERR_RESET_FAILED;
  80
  81	return 0;
  82}
  83
  84/**
  85 *  fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
  86 *  @hw: pointer to hardware structure
  87 *
  88 *  Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
  89 **/
  90static bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
  91{
  92	u16 sriov_ctrl = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_SRIOV_CTRL);
  93
  94	return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
  95}
  96
  97/**
  98 *  fm10k_init_hw_pf - PF hardware initialization
  99 *  @hw: pointer to hardware structure
 100 *
 101 **/
 102static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
 103{
 104	u32 dma_ctrl, txqctl;
 105	u16 i;
 106
 107	/* Establish default VSI as valid */
 108	fm10k_write_reg(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
 109	fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
 110			FM10K_DGLORTMAP_ANY);
 111
 112	/* Invalidate all other GLORT entries */
 113	for (i = 1; i < FM10K_DGLORT_COUNT; i++)
 114		fm10k_write_reg(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
 115
 116	/* reset ITR2(0) to point to itself */
 117	fm10k_write_reg(hw, FM10K_ITR2(0), 0);
 118
 119	/* reset VF ITR2(0) to point to 0 avoid PF registers */
 120	fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
 121
 122	/* loop through all PF ITR2 registers pointing them to the previous */
 123	for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
 124		fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
 125
 126	/* Enable interrupt moderator if not already enabled */
 127	fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
 128
 129	/* compute the default txqctl configuration */
 130	txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
 131		 (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
 132
 133	for (i = 0; i < FM10K_MAX_QUEUES; i++) {
 134		/* configure rings for 256 Queue / 32 Descriptor cache mode */
 135		fm10k_write_reg(hw, FM10K_TQDLOC(i),
 136				(i * FM10K_TQDLOC_BASE_32_DESC) |
 137				FM10K_TQDLOC_SIZE_32_DESC);
 138		fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
 139
 140		/* configure rings to provide TPH processing hints */
 141		fm10k_write_reg(hw, FM10K_TPH_TXCTRL(i),
 142				FM10K_TPH_TXCTRL_DESC_TPHEN |
 143				FM10K_TPH_TXCTRL_DESC_RROEN |
 144				FM10K_TPH_TXCTRL_DESC_WROEN |
 145				FM10K_TPH_TXCTRL_DATA_RROEN);
 146		fm10k_write_reg(hw, FM10K_TPH_RXCTRL(i),
 147				FM10K_TPH_RXCTRL_DESC_TPHEN |
 148				FM10K_TPH_RXCTRL_DESC_RROEN |
 149				FM10K_TPH_RXCTRL_DATA_WROEN |
 150				FM10K_TPH_RXCTRL_HDR_WROEN);
 151	}
 152
 153	/* set max hold interval to align with 1.024 usec in all modes and
 154	 * store ITR scale
 155	 */
 156	switch (hw->bus.speed) {
 157	case fm10k_bus_speed_2500:
 158		dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
 159		hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1;
 160		break;
 161	case fm10k_bus_speed_5000:
 162		dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
 163		hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2;
 164		break;
 165	case fm10k_bus_speed_8000:
 166		dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
 167		hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
 168		break;
 169	default:
 170		dma_ctrl = 0;
 171		/* just in case, assume Gen3 ITR scale */
 172		hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
 173		break;
 174	}
 175
 176	/* Configure TSO flags */
 177	fm10k_write_reg(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
 178	fm10k_write_reg(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
 179
 180	/* Enable DMA engine
 181	 * Set Rx Descriptor size to 32
 182	 * Set Minimum MSS to 64
 183	 * Set Maximum number of Rx queues to 256 / 32 Descriptor
 184	 */
 185	dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
 186		    FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
 187		    FM10K_DMA_CTRL_32_DESC;
 188
 189	fm10k_write_reg(hw, FM10K_DMA_CTRL, dma_ctrl);
 190
 191	/* record maximum queue count, we limit ourselves to 128 */
 192	hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
 193
 194	/* We support either 64 VFs or 7 VFs depending on if we have ARI */
 195	hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
 196
 197	return 0;
 198}
 199
 200/**
 201 *  fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
 202 *  @hw: pointer to hardware structure
 203 *  @vid: VLAN ID to add to table
 204 *  @vsi: Index indicating VF ID or PF ID in table
 205 *  @set: Indicates if this is a set or clear operation
 206 *
 207 *  This function adds or removes the corresponding VLAN ID from the VLAN
 208 *  filter table for the corresponding function.  In addition to the
 209 *  standard set/clear that supports one bit a multi-bit write is
 210 *  supported to set 64 bits at a time.
 211 **/
 212static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
 213{
 214	u32 vlan_table, reg, mask, bit, len;
 215
 216	/* verify the VSI index is valid */
 217	if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
 218		return FM10K_ERR_PARAM;
 219
 220	/* VLAN multi-bit write:
 221	 * The multi-bit write has several parts to it.
 222	 *               24              16               8               0
 223	 *  7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
 224	 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 225	 * | RSVD0 |         Length        |C|RSVD0|        VLAN ID        |
 226	 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 227	 *
 228	 * VLAN ID: Vlan Starting value
 229	 * RSVD0: Reserved section, must be 0
 230	 * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
 231	 * Length: Number of times to repeat the bit being set
 232	 */
 233	len = vid >> 16;
 234	vid = (vid << 17) >> 17;
 235
 236	/* verify the reserved 0 fields are 0 */
 237	if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
 238		return FM10K_ERR_PARAM;
 239
 240	/* Loop through the table updating all required VLANs */
 241	for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
 242	     len < FM10K_VLAN_TABLE_VID_MAX;
 243	     len -= 32 - bit, reg++, bit = 0) {
 244		/* record the initial state of the register */
 245		vlan_table = fm10k_read_reg(hw, reg);
 246
 247		/* truncate mask if we are at the start or end of the run */
 248		mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
 249
 250		/* make necessary modifications to the register */
 251		mask &= set ? ~vlan_table : vlan_table;
 252		if (mask)
 253			fm10k_write_reg(hw, reg, vlan_table ^ mask);
 254	}
 255
 256	return 0;
 257}
 258
 259/**
 260 *  fm10k_read_mac_addr_pf - Read device MAC address
 261 *  @hw: pointer to the HW structure
 262 *
 263 *  Reads the device MAC address from the SM_AREA and stores the value.
 264 **/
 265static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
 266{
 267	u8 perm_addr[ETH_ALEN];
 268	u32 serial_num;
 269
 270	serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1));
 271
 272	/* last byte should be all 1's */
 273	if ((~serial_num) << 24)
 274		return  FM10K_ERR_INVALID_MAC_ADDR;
 275
 276	perm_addr[0] = (u8)(serial_num >> 24);
 277	perm_addr[1] = (u8)(serial_num >> 16);
 278	perm_addr[2] = (u8)(serial_num >> 8);
 279
 280	serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(0));
 281
 282	/* first byte should be all 1's */
 283	if ((~serial_num) >> 24)
 284		return  FM10K_ERR_INVALID_MAC_ADDR;
 285
 286	perm_addr[3] = (u8)(serial_num >> 16);
 287	perm_addr[4] = (u8)(serial_num >> 8);
 288	perm_addr[5] = (u8)(serial_num);
 289
 290	ether_addr_copy(hw->mac.perm_addr, perm_addr);
 291	ether_addr_copy(hw->mac.addr, perm_addr);
 292
 293	return 0;
 294}
 295
 296/**
 297 *  fm10k_glort_valid_pf - Validate that the provided glort is valid
 298 *  @hw: pointer to the HW structure
 299 *  @glort: base glort to be validated
 300 *
 301 *  This function will return an error if the provided glort is invalid
 302 **/
 303bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
 304{
 305	glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
 306
 307	return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
 308}
 309
 310/**
 311 *  fm10k_update_xc_addr_pf - Update device addresses
 312 *  @hw: pointer to the HW structure
 313 *  @glort: base resource tag for this request
 314 *  @mac: MAC address to add/remove from table
 315 *  @vid: VLAN ID to add/remove from table
 316 *  @add: Indicates if this is an add or remove operation
 317 *  @flags: flags field to indicate add and secure
 318 *
 319 *  This function generates a message to the Switch API requesting
 320 *  that the given logical port add/remove the given L2 MAC/VLAN address.
 321 **/
 322static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
 323				   const u8 *mac, u16 vid, bool add, u8 flags)
 324{
 325	struct fm10k_mbx_info *mbx = &hw->mbx;
 326	struct fm10k_mac_update mac_update;
 327	u32 msg[5];
 328
 329	/* clear set bit from VLAN ID */
 330	vid &= ~FM10K_VLAN_CLEAR;
 331
 332	/* if glort or VLAN are not valid return error */
 333	if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
 334		return FM10K_ERR_PARAM;
 335
 336	/* record fields */
 337	mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
 338						 ((u32)mac[3] << 16) |
 339						 ((u32)mac[4] << 8) |
 340						 ((u32)mac[5]));
 341	mac_update.mac_upper = cpu_to_le16(((u16)mac[0] << 8) |
 342					   ((u16)mac[1]));
 343	mac_update.vlan = cpu_to_le16(vid);
 344	mac_update.glort = cpu_to_le16(glort);
 345	mac_update.action = add ? 0 : 1;
 346	mac_update.flags = flags;
 347
 348	/* populate mac_update fields */
 349	fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
 350	fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
 351				     &mac_update, sizeof(mac_update));
 352
 353	/* load onto outgoing mailbox */
 354	return mbx->ops.enqueue_tx(hw, mbx, msg);
 355}
 356
 357/**
 358 *  fm10k_update_uc_addr_pf - Update device unicast addresses
 359 *  @hw: pointer to the HW structure
 360 *  @glort: base resource tag for this request
 361 *  @mac: MAC address to add/remove from table
 362 *  @vid: VLAN ID to add/remove from table
 363 *  @add: Indicates if this is an add or remove operation
 364 *  @flags: flags field to indicate add and secure
 365 *
 366 *  This function is used to add or remove unicast addresses for
 367 *  the PF.
 368 **/
 369static s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
 370				   const u8 *mac, u16 vid, bool add, u8 flags)
 371{
 372	/* verify MAC address is valid */
 373	if (!is_valid_ether_addr(mac))
 374		return FM10K_ERR_PARAM;
 375
 376	return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
 377}
 378
 379/**
 380 *  fm10k_update_mc_addr_pf - Update device multicast addresses
 381 *  @hw: pointer to the HW structure
 382 *  @glort: base resource tag for this request
 383 *  @mac: MAC address to add/remove from table
 384 *  @vid: VLAN ID to add/remove from table
 385 *  @add: Indicates if this is an add or remove operation
 386 *
 387 *  This function is used to add or remove multicast MAC addresses for
 388 *  the PF.
 389 **/
 390static s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
 391				   const u8 *mac, u16 vid, bool add)
 392{
 393	/* verify multicast address is valid */
 394	if (!is_multicast_ether_addr(mac))
 395		return FM10K_ERR_PARAM;
 396
 397	return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
 398}
 399
 400/**
 401 *  fm10k_update_xcast_mode_pf - Request update of multicast mode
 402 *  @hw: pointer to hardware structure
 403 *  @glort: base resource tag for this request
 404 *  @mode: integer value indicating mode being requested
 405 *
 406 *  This function will attempt to request a higher mode for the port
 407 *  so that it can enable either multicast, multicast promiscuous, or
 408 *  promiscuous mode of operation.
 409 **/
 410static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
 411{
 412	struct fm10k_mbx_info *mbx = &hw->mbx;
 413	u32 msg[3], xcast_mode;
 414
 415	if (mode > FM10K_XCAST_MODE_NONE)
 416		return FM10K_ERR_PARAM;
 417
 418	/* if glort is not valid return error */
 419	if (!fm10k_glort_valid_pf(hw, glort))
 420		return FM10K_ERR_PARAM;
 421
 422	/* write xcast mode as a single u32 value,
 423	 * lower 16 bits: glort
 424	 * upper 16 bits: mode
 425	 */
 426	xcast_mode = ((u32)mode << 16) | glort;
 427
 428	/* generate message requesting to change xcast mode */
 429	fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
 430	fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
 431
 432	/* load onto outgoing mailbox */
 433	return mbx->ops.enqueue_tx(hw, mbx, msg);
 434}
 435
 436/**
 437 *  fm10k_update_int_moderator_pf - Update interrupt moderator linked list
 438 *  @hw: pointer to hardware structure
 439 *
 440 *  This function walks through the MSI-X vector table to determine the
 441 *  number of active interrupts and based on that information updates the
 442 *  interrupt moderator linked list.
 443 **/
 444static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
 445{
 446	u32 i;
 447
 448	/* Disable interrupt moderator */
 449	fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
 450
 451	/* loop through PF from last to first looking enabled vectors */
 452	for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
 453		if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
 454			break;
 455	}
 456
 457	/* always reset VFITR2[0] to point to last enabled PF vector */
 458	fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
 459
 460	/* reset ITR2[0] to point to last enabled PF vector */
 461	if (!hw->iov.num_vfs)
 462		fm10k_write_reg(hw, FM10K_ITR2(0), i);
 463
 464	/* Enable interrupt moderator */
 465	fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
 466}
 467
 468/**
 469 *  fm10k_update_lport_state_pf - Notify the switch of a change in port state
 470 *  @hw: pointer to the HW structure
 471 *  @glort: base resource tag for this request
 472 *  @count: number of logical ports being updated
 473 *  @enable: boolean value indicating enable or disable
 474 *
 475 *  This function is used to add/remove a logical port from the switch.
 476 **/
 477static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
 478				       u16 count, bool enable)
 479{
 480	struct fm10k_mbx_info *mbx = &hw->mbx;
 481	u32 msg[3], lport_msg;
 482
 483	/* do nothing if we are being asked to create or destroy 0 ports */
 484	if (!count)
 485		return 0;
 486
 487	/* if glort is not valid return error */
 488	if (!fm10k_glort_valid_pf(hw, glort))
 489		return FM10K_ERR_PARAM;
 490
 491	/* reset multicast mode if deleting lport */
 492	if (!enable)
 493		fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
 494
 495	/* construct the lport message from the 2 pieces of data we have */
 496	lport_msg = ((u32)count << 16) | glort;
 497
 498	/* generate lport create/delete message */
 499	fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
 500					 FM10K_PF_MSG_ID_LPORT_DELETE);
 501	fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
 502
 503	/* load onto outgoing mailbox */
 504	return mbx->ops.enqueue_tx(hw, mbx, msg);
 505}
 506
 507/**
 508 *  fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
 509 *  @hw: pointer to hardware structure
 510 *  @dglort: pointer to dglort configuration structure
 511 *
 512 *  Reads the configuration structure contained in dglort_cfg and uses
 513 *  that information to then populate a DGLORTMAP/DEC entry and the queues
 514 *  to which it has been assigned.
 515 **/
 516static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
 517					 struct fm10k_dglort_cfg *dglort)
 518{
 519	u16 glort, queue_count, vsi_count, pc_count;
 520	u16 vsi, queue, pc, q_idx;
 521	u32 txqctl, dglortdec, dglortmap;
 522
 523	/* verify the dglort pointer */
 524	if (!dglort)
 525		return FM10K_ERR_PARAM;
 526
 527	/* verify the dglort values */
 528	if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
 529	    (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
 530	    (dglort->queue_l > 8) || (dglort->queue_b >= 256))
 531		return FM10K_ERR_PARAM;
 532
 533	/* determine count of VSIs and queues */
 534	queue_count = BIT(dglort->rss_l + dglort->pc_l);
 535	vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
 536	glort = dglort->glort;
 537	q_idx = dglort->queue_b;
 538
 539	/* configure SGLORT for queues */
 540	for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
 541		for (queue = 0; queue < queue_count; queue++, q_idx++) {
 542			if (q_idx >= FM10K_MAX_QUEUES)
 543				break;
 544
 545			fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort);
 546			fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort);
 547		}
 548	}
 549
 550	/* determine count of PCs and queues */
 551	queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
 552	pc_count = BIT(dglort->pc_l);
 553
 554	/* configure PC for Tx queues */
 555	for (pc = 0; pc < pc_count; pc++) {
 556		q_idx = pc + dglort->queue_b;
 557		for (queue = 0; queue < queue_count; queue++) {
 558			if (q_idx >= FM10K_MAX_QUEUES)
 559				break;
 560
 561			txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx));
 562			txqctl &= ~FM10K_TXQCTL_PC_MASK;
 563			txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
 564			fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl);
 565
 566			q_idx += pc_count;
 567		}
 568	}
 569
 570	/* configure DGLORTDEC */
 571	dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
 572		    ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
 573		    ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
 574		    ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
 575		    ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
 576		    ((u32)(dglort->queue_l));
 577	if (dglort->inner_rss)
 578		dglortdec |=  FM10K_DGLORTDEC_INNERRSS_ENABLE;
 579
 580	/* configure DGLORTMAP */
 581	dglortmap = (dglort->idx == fm10k_dglort_default) ?
 582			FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
 583	dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
 584	dglortmap |= dglort->glort;
 585
 586	/* write values to hardware */
 587	fm10k_write_reg(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
 588	fm10k_write_reg(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
 589
 590	return 0;
 591}
 592
 593u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
 594{
 595	u16 num_pools = hw->iov.num_pools;
 596
 597	return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
 598	       8 : FM10K_MAX_QUEUES_POOL;
 599}
 600
 601u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
 602{
 603	u16 num_vfs = hw->iov.num_vfs;
 604	u16 vf_q_idx = FM10K_MAX_QUEUES;
 605
 606	vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
 607
 608	return vf_q_idx;
 609}
 610
 611static u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
 612{
 613	u16 num_pools = hw->iov.num_pools;
 614
 615	return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
 616	       FM10K_MAX_VECTORS_POOL;
 617}
 618
 619static u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
 620{
 621	u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
 622
 623	vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
 624
 625	return vf_v_idx;
 626}
 627
 628/**
 629 *  fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
 630 *  @hw: pointer to the HW structure
 631 *  @num_vfs: number of VFs to be allocated
 632 *  @num_pools: number of virtualization pools to be allocated
 633 *
 634 *  Allocates queues and traffic classes to virtualization entities to prepare
 635 *  the PF for SR-IOV and VMDq
 636 **/
 637static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
 638					 u16 num_pools)
 639{
 640	u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
 641	u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
 642	int i, j;
 643
 644	/* hardware only supports up to 64 pools */
 645	if (num_pools > 64)
 646		return FM10K_ERR_PARAM;
 647
 648	/* the number of VFs cannot exceed the number of pools */
 649	if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
 650		return FM10K_ERR_PARAM;
 651
 652	/* record number of virtualization entities */
 653	hw->iov.num_vfs = num_vfs;
 654	hw->iov.num_pools = num_pools;
 655
 656	/* determine qmap offsets and counts */
 657	qmap_stride = (num_vfs > 8) ? 32 : 256;
 658	qpp = fm10k_queues_per_pool(hw);
 659	vpp = fm10k_vectors_per_pool(hw);
 660
 661	/* calculate starting index for queues */
 662	vf_q_idx = fm10k_vf_queue_index(hw, 0);
 663	qmap_idx = 0;
 664
 665	/* establish TCs with -1 credits and no quanta to prevent transmit */
 666	for (i = 0; i < num_vfs; i++) {
 667		fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(i), 0);
 668		fm10k_write_reg(hw, FM10K_TC_RATE(i), 0);
 669		fm10k_write_reg(hw, FM10K_TC_CREDIT(i),
 670				FM10K_TC_CREDIT_CREDIT_MASK);
 671	}
 672
 673	/* zero out all mbmem registers */
 674	for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
 675		fm10k_write_reg(hw, FM10K_MBMEM(i), 0);
 676
 677	/* clear event notification of VF FLR */
 678	fm10k_write_reg(hw, FM10K_PFVFLREC(0), ~0);
 679	fm10k_write_reg(hw, FM10K_PFVFLREC(1), ~0);
 680
 681	/* loop through unallocated rings assigning them back to PF */
 682	for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
 683		fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
 684		fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
 685				FM10K_TXQCTL_UNLIMITED_BW | vid);
 686		fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
 687	}
 688
 689	/* PF should have already updated VFITR2[0] */
 690
 691	/* update all ITR registers to flow to VFITR2[0] */
 692	for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
 693		if (!(i & (vpp - 1)))
 694			fm10k_write_reg(hw, FM10K_ITR2(i), i - vpp);
 695		else
 696			fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
 697	}
 698
 699	/* update PF ITR2[0] to reference the last vector */
 700	fm10k_write_reg(hw, FM10K_ITR2(0),
 701			fm10k_vf_vector_index(hw, num_vfs - 1));
 702
 703	/* loop through rings populating rings and TCs */
 704	for (i = 0; i < num_vfs; i++) {
 705		/* record index for VF queue 0 for use in end of loop */
 706		vf_q_idx0 = vf_q_idx;
 707
 708		for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
 709			/* assign VF and locked TC to queues */
 710			fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
 711			fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx),
 712					(i << FM10K_TXQCTL_TC_SHIFT) | i |
 713					FM10K_TXQCTL_VF | vid);
 714			fm10k_write_reg(hw, FM10K_RXDCTL(vf_q_idx),
 715					FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
 716					FM10K_RXDCTL_DROP_ON_EMPTY);
 717			fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
 718					(i << FM10K_RXQCTL_VF_SHIFT) |
 719					FM10K_RXQCTL_VF);
 720
 721			/* map queue pair to VF */
 722			fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
 723			fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
 724		}
 725
 726		/* repeat the first ring for all of the remaining VF rings */
 727		for (; j < qmap_stride; j++, qmap_idx++) {
 728			fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
 729			fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
 730		}
 731	}
 732
 733	/* loop through remaining indexes assigning all to queue 0 */
 734	while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
 735		fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
 736		fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), 0);
 737		qmap_idx++;
 738	}
 739
 740	return 0;
 741}
 742
 743/**
 744 *  fm10k_iov_configure_tc_pf - Configure the shaping group for VF
 745 *  @hw: pointer to the HW structure
 746 *  @vf_idx: index of VF receiving GLORT
 747 *  @rate: Rate indicated in Mb/s
 748 *
 749 *  Configured the TC for a given VF to allow only up to a given number
 750 *  of Mb/s of outgoing Tx throughput.
 751 **/
 752static s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
 753{
 754	/* configure defaults */
 755	u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
 756	u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
 757
 758	/* verify vf is in range */
 759	if (vf_idx >= hw->iov.num_vfs)
 760		return FM10K_ERR_PARAM;
 761
 762	/* set interval to align with 4.096 usec in all modes */
 763	switch (hw->bus.speed) {
 764	case fm10k_bus_speed_2500:
 765		interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
 766		break;
 767	case fm10k_bus_speed_5000:
 768		interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
 769		break;
 770	default:
 771		break;
 772	}
 773
 774	if (rate) {
 775		if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
 776			return FM10K_ERR_PARAM;
 777
 778		/* The quanta is measured in Bytes per 4.096 or 8.192 usec
 779		 * The rate is provided in Mbits per second
 780		 * To tralslate from rate to quanta we need to multiply the
 781		 * rate by 8.192 usec and divide by 8 bits/byte.  To avoid
 782		 * dealing with floating point we can round the values up
 783		 * to the nearest whole number ratio which gives us 128 / 125.
 784		 */
 785		tc_rate = (rate * 128) / 125;
 786
 787		/* try to keep the rate limiting accurate by increasing
 788		 * the number of credits and interval for rates less than 4Gb/s
 789		 */
 790		if (rate < 4000)
 791			interval <<= 1;
 792		else
 793			tc_rate >>= 1;
 794	}
 795
 796	/* update rate limiter with new values */
 797	fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
 798	fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
 799	fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
 800
 801	return 0;
 802}
 803
 804/**
 805 *  fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
 806 *  @hw: pointer to the HW structure
 807 *  @vf_idx: index of VF receiving GLORT
 808 *
 809 *  Update the interrupt moderator linked list to include any MSI-X
 810 *  interrupts which the VF has enabled in the MSI-X vector table.
 811 **/
 812static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
 813{
 814	u16 vf_v_idx, vf_v_limit, i;
 815
 816	/* verify vf is in range */
 817	if (vf_idx >= hw->iov.num_vfs)
 818		return FM10K_ERR_PARAM;
 819
 820	/* determine vector offset and count */
 821	vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
 822	vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
 823
 824	/* search for first vector that is not masked */
 825	for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
 826		if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
 827			break;
 828	}
 829
 830	/* reset linked list so it now includes our active vectors */
 831	if (vf_idx == (hw->iov.num_vfs - 1))
 832		fm10k_write_reg(hw, FM10K_ITR2(0), i);
 833	else
 834		fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), i);
 835
 836	return 0;
 837}
 838
 839/**
 840 *  fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
 841 *  @hw: pointer to the HW structure
 842 *  @vf_info: pointer to VF information structure
 843 *
 844 *  Assign a MAC address and default VLAN to a VF and notify it of the update
 845 **/
 846static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
 847						struct fm10k_vf_info *vf_info)
 848{
 849	u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
 850	u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
 851	s32 err = 0;
 852	u16 vf_idx, vf_vid;
 853
 854	/* verify vf is in range */
 855	if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
 856		return FM10K_ERR_PARAM;
 857
 858	/* determine qmap offsets and counts */
 859	qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
 860	queues_per_pool = fm10k_queues_per_pool(hw);
 861
 862	/* calculate starting index for queues */
 863	vf_idx = vf_info->vf_idx;
 864	vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
 865	qmap_idx = qmap_stride * vf_idx;
 866
 867	/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
 868	 * used here to indicate to the VF that it will not have privilege to
 869	 * write VLAN_TABLE. All policy is enforced on the PF but this allows
 870	 * the VF to correctly report errors to userspace requests.
 871	 */
 872	if (vf_info->pf_vid)
 873		vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
 874	else
 875		vf_vid = vf_info->sw_vid;
 876
 877	/* generate MAC_ADDR request */
 878	fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
 879	fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
 880				    vf_info->mac, vf_vid);
 881
 882	/* Configure Queue control register with new VLAN ID. The TXQCTL
 883	 * register is RO from the VF, so the PF must do this even in the
 884	 * case of notifying the VF of a new VID via the mailbox.
 885	 */
 886	txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
 887		 FM10K_TXQCTL_VID_MASK;
 888	txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
 889		  FM10K_TXQCTL_VF | vf_idx;
 890
 891	for (i = 0; i < queues_per_pool; i++)
 892		fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
 893
 894	/* try loading a message onto outgoing mailbox first */
 895	if (vf_info->mbx.ops.enqueue_tx) {
 896		err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
 897		if (err != FM10K_MBX_ERR_NO_MBX)
 898			return err;
 899		err = 0;
 900	}
 901
 902	/* If we aren't connected to a mailbox, this is most likely because
 903	 * the VF driver is not running. It should thus be safe to re-map
 904	 * queues and use the registers to pass the MAC address so that the VF
 905	 * driver gets correct information during its initialization.
 906	 */
 907
 908	/* MAP Tx queue back to 0 temporarily, and disable it */
 909	fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
 910	fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
 911
 912	/* verify ring has disabled before modifying base address registers */
 913	txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
 914	for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
 915		/* limit ourselves to a 1ms timeout */
 916		if (timeout == 10) {
 917			err = FM10K_ERR_DMA_PENDING;
 918			goto err_out;
 919		}
 920
 921		usleep_range(100, 200);
 922		txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
 923	}
 924
 925	/* Update base address registers to contain MAC address */
 926	if (is_valid_ether_addr(vf_info->mac)) {
 927		tdbal = (((u32)vf_info->mac[3]) << 24) |
 928			(((u32)vf_info->mac[4]) << 16) |
 929			(((u32)vf_info->mac[5]) << 8);
 930
 931		tdbah = (((u32)0xFF)	        << 24) |
 932			(((u32)vf_info->mac[0]) << 16) |
 933			(((u32)vf_info->mac[1]) << 8) |
 934			((u32)vf_info->mac[2]);
 935	}
 936
 937	/* Record the base address into queue 0 */
 938	fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal);
 939	fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah);
 940
 941	/* Provide the VF the ITR scale, using software-defined fields in TDLEN
 942	 * to pass the information during VF initialization. See definition of
 943	 * FM10K_TDLEN_ITR_SCALE_SHIFT for more details.
 944	 */
 945	fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale <<
 946						   FM10K_TDLEN_ITR_SCALE_SHIFT);
 947
 948err_out:
 949	/* restore the queue back to VF ownership */
 950	fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
 951	return err;
 952}
 953
 954/**
 955 *  fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
 956 *  @hw: pointer to the HW structure
 957 *  @vf_info: pointer to VF information structure
 958 *
 959 *  Reassign the interrupts and queues to a VF following an FLR
 960 **/
 961static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
 962					struct fm10k_vf_info *vf_info)
 963{
 964	u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
 965	u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
 966	u16 vf_v_idx, vf_v_limit, vf_vid;
 967	u8 vf_idx = vf_info->vf_idx;
 968	int i;
 969
 970	/* verify vf is in range */
 971	if (vf_idx >= hw->iov.num_vfs)
 972		return FM10K_ERR_PARAM;
 973
 974	/* clear event notification of VF FLR */
 975	fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
 976
 977	/* force timeout and then disconnect the mailbox */
 978	vf_info->mbx.timeout = 0;
 979	if (vf_info->mbx.ops.disconnect)
 980		vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
 981
 982	/* determine vector offset and count */
 983	vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
 984	vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
 985
 986	/* determine qmap offsets and counts */
 987	qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
 988	queues_per_pool = fm10k_queues_per_pool(hw);
 989	qmap_idx = qmap_stride * vf_idx;
 990
 991	/* make all the queues inaccessible to the VF */
 992	for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
 993		fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
 994		fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
 995	}
 996
 997	/* calculate starting index for queues */
 998	vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
 999
1000	/* determine correct default VLAN ID */
1001	if (vf_info->pf_vid)
1002		vf_vid = vf_info->pf_vid;
1003	else
1004		vf_vid = vf_info->sw_vid;
1005
1006	/* configure Queue control register */
1007	txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
1008		 (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
1009		 FM10K_TXQCTL_VF | vf_idx;
1010	rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
1011
1012	/* stop further DMA and reset queue ownership back to VF */
1013	for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
1014		fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
1015		fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
1016		fm10k_write_reg(hw, FM10K_RXDCTL(i),
1017				FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
1018				FM10K_RXDCTL_DROP_ON_EMPTY);
1019		fm10k_write_reg(hw, FM10K_RXQCTL(i), rxqctl);
1020	}
1021
1022	/* reset TC with -1 credits and no quanta to prevent transmit */
1023	fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
1024	fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), 0);
1025	fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx),
1026			FM10K_TC_CREDIT_CREDIT_MASK);
1027
1028	/* update our first entry in the table based on previous VF */
1029	if (!vf_idx)
1030		hw->mac.ops.update_int_moderator(hw);
1031	else
1032		hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
1033
1034	/* reset linked list so it now includes our active vectors */
1035	if (vf_idx == (hw->iov.num_vfs - 1))
1036		fm10k_write_reg(hw, FM10K_ITR2(0), vf_v_idx);
1037	else
1038		fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
1039
1040	/* link remaining vectors so that next points to previous */
1041	for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
1042		fm10k_write_reg(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
1043
1044	/* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
1045	for (i = FM10K_VFMBMEM_LEN; i--;)
1046		fm10k_write_reg(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
1047	for (i = FM10K_VLAN_TABLE_SIZE; i--;)
1048		fm10k_write_reg(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
1049	for (i = FM10K_RETA_SIZE; i--;)
1050		fm10k_write_reg(hw, FM10K_RETA(vf_info->vsi, i), 0);
1051	for (i = FM10K_RSSRK_SIZE; i--;)
1052		fm10k_write_reg(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
1053	fm10k_write_reg(hw, FM10K_MRQC(vf_info->vsi), 0);
1054
1055	/* Update base address registers to contain MAC address */
1056	if (is_valid_ether_addr(vf_info->mac)) {
1057		tdbal = (((u32)vf_info->mac[3]) << 24) |
1058			(((u32)vf_info->mac[4]) << 16) |
1059			(((u32)vf_info->mac[5]) << 8);
1060		tdbah = (((u32)0xFF)	   << 24) |
1061			(((u32)vf_info->mac[0]) << 16) |
1062			(((u32)vf_info->mac[1]) << 8) |
1063			((u32)vf_info->mac[2]);
1064	}
1065
1066	/* map queue pairs back to VF from last to first */
1067	for (i = queues_per_pool; i--;) {
1068		fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
1069		fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
1070		/* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an
1071		 * explanation of how TDLEN is used.
1072		 */
1073		fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx + i),
1074				hw->mac.itr_scale <<
1075				FM10K_TDLEN_ITR_SCALE_SHIFT);
1076		fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
1077		fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
1078	}
1079
1080	/* repeat the first ring for all the remaining VF rings */
1081	for (i = queues_per_pool; i < qmap_stride; i++) {
1082		fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
1083		fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
1084	}
1085
1086	return 0;
1087}
1088
1089/**
1090 *  fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
1091 *  @hw: pointer to hardware structure
1092 *  @vf_info: pointer to VF information structure
1093 *  @lport_idx: Logical port offset from the hardware glort
1094 *  @flags: Set of capability flags to extend port beyond basic functionality
1095 *
1096 *  This function allows enabling a VF port by assigning it a GLORT and
1097 *  setting the flags so that it can enable an Rx mode.
1098 **/
1099static s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
1100				  struct fm10k_vf_info *vf_info,
1101				  u16 lport_idx, u8 flags)
1102{
1103	u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
1104
1105	/* if glort is not valid return error */
1106	if (!fm10k_glort_valid_pf(hw, glort))
1107		return FM10K_ERR_PARAM;
1108
1109	vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
1110	vf_info->glort = glort;
1111
1112	return 0;
1113}
1114
1115/**
1116 *  fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
1117 *  @hw: pointer to hardware structure
1118 *  @vf_info: pointer to VF information structure
1119 *
1120 *  This function disables a VF port by stripping it of a GLORT and
1121 *  setting the flags so that it cannot enable any Rx mode.
1122 **/
1123static void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
1124				     struct fm10k_vf_info *vf_info)
1125{
1126	u32 msg[1];
1127
1128	/* need to disable the port if it is already enabled */
1129	if (FM10K_VF_FLAG_ENABLED(vf_info)) {
1130		/* notify switch that this port has been disabled */
1131		fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
1132
1133		/* generate port state response to notify VF it is not ready */
1134		fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1135		vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
1136	}
1137
1138	/* clear flags and glort if it exists */
1139	vf_info->vf_flags = 0;
1140	vf_info->glort = 0;
1141}
1142
1143/**
1144 *  fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
1145 *  @hw: pointer to hardware structure
1146 *  @q: stats for all queues of a VF
1147 *  @vf_idx: index of VF
1148 *
1149 *  This function collects queue stats for VFs.
1150 **/
1151static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
1152				      struct fm10k_hw_stats_q *q,
1153				      u16 vf_idx)
1154{
1155	u32 idx, qpp;
1156
1157	/* get stats for all of the queues */
1158	qpp = fm10k_queues_per_pool(hw);
1159	idx = fm10k_vf_queue_index(hw, vf_idx);
1160	fm10k_update_hw_stats_q(hw, q, idx, qpp);
1161}
1162
1163/**
1164 *  fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
1165 *  @hw: Pointer to hardware structure
1166 *  @results: Pointer array to message, results[0] is pointer to message
1167 *  @mbx: Pointer to mailbox information structure
1168 *
1169 *  This function is a default handler for MSI-X requests from the VF.  The
1170 *  assumption is that in this case it is acceptable to just directly
1171 *  hand off the message from the VF to the underlying shared code.
1172 **/
1173s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
1174			  struct fm10k_mbx_info *mbx)
1175{
1176	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1177	u8 vf_idx = vf_info->vf_idx;
1178
1179	return hw->iov.ops.assign_int_moderator(hw, vf_idx);
1180}
1181
1182/**
1183 * fm10k_iov_select_vid - Select correct default VLAN ID
1184 * @vf_info: pointer to VF information structure
1185 * @vid: VLAN ID to correct
1186 *
1187 * Will report an error if the VLAN ID is out of range. For VID = 0, it will
1188 * return either the pf_vid or sw_vid depending on which one is set.
1189 */
1190s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
1191{
1192	if (!vid)
1193		return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
1194	else if (vf_info->pf_vid && vid != vf_info->pf_vid)
1195		return FM10K_ERR_PARAM;
1196	else
1197		return vid;
1198}
1199
1200/**
1201 *  fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
1202 *  @hw: Pointer to hardware structure
1203 *  @results: Pointer array to message, results[0] is pointer to message
1204 *  @mbx: Pointer to mailbox information structure
1205 *
1206 *  This function is a default handler for MAC/VLAN requests from the VF.
1207 *  The assumption is that in this case it is acceptable to just directly
1208 *  hand off the message from the VF to the underlying shared code.
1209 **/
1210s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
1211			      struct fm10k_mbx_info *mbx)
1212{
1213	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1214	u8 mac[ETH_ALEN];
1215	u32 *result;
1216	int err = 0;
1217	bool set;
1218	u16 vlan;
1219	u32 vid;
1220
1221	/* we shouldn't be updating rules on a disabled interface */
1222	if (!FM10K_VF_FLAG_ENABLED(vf_info))
1223		err = FM10K_ERR_PARAM;
1224
1225	if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
1226		result = results[FM10K_MAC_VLAN_MSG_VLAN];
1227
1228		/* record VLAN id requested */
1229		err = fm10k_tlv_attr_get_u32(result, &vid);
1230		if (err)
1231			return err;
1232
1233		set = !(vid & FM10K_VLAN_CLEAR);
1234		vid &= ~FM10K_VLAN_CLEAR;
1235
1236		/* if the length field has been set, this is a multi-bit
1237		 * update request. For multi-bit requests, simply disallow
1238		 * them when the pf_vid has been set. In this case, the PF
1239		 * should have already cleared the VLAN_TABLE, and if we
1240		 * allowed them, it could allow a rogue VF to receive traffic
1241		 * on a VLAN it was not assigned. In the single-bit case, we
1242		 * need to modify requests for VLAN 0 to use the default PF or
1243		 * SW vid when assigned.
1244		 */
1245
1246		if (vid >> 16) {
1247			/* prevent multi-bit requests when PF has
1248			 * administratively set the VLAN for this VF
1249			 */
1250			if (vf_info->pf_vid)
1251				return FM10K_ERR_PARAM;
1252		} else {
1253			err = fm10k_iov_select_vid(vf_info, (u16)vid);
1254			if (err < 0)
1255				return err;
1256
1257			vid = err;
1258		}
1259
1260		/* update VSI info for VF in regards to VLAN table */
1261		err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
1262	}
1263
1264	if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
1265		result = results[FM10K_MAC_VLAN_MSG_MAC];
1266
1267		/* record unicast MAC address requested */
1268		err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1269		if (err)
1270			return err;
1271
1272		/* block attempts to set MAC for a locked device */
1273		if (is_valid_ether_addr(vf_info->mac) &&
1274		    !ether_addr_equal(mac, vf_info->mac))
1275			return FM10K_ERR_PARAM;
1276
1277		set = !(vlan & FM10K_VLAN_CLEAR);
1278		vlan &= ~FM10K_VLAN_CLEAR;
1279
1280		err = fm10k_iov_select_vid(vf_info, vlan);
1281		if (err < 0)
1282			return err;
1283
1284		vlan = (u16)err;
1285
1286		/* notify switch of request for new unicast address */
1287		err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
1288						 mac, vlan, set, 0);
1289	}
1290
1291	if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
1292		result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
1293
1294		/* record multicast MAC address requested */
1295		err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1296		if (err)
1297			return err;
1298
1299		/* verify that the VF is allowed to request multicast */
1300		if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
1301			return FM10K_ERR_PARAM;
1302
1303		set = !(vlan & FM10K_VLAN_CLEAR);
1304		vlan &= ~FM10K_VLAN_CLEAR;
1305
1306		err = fm10k_iov_select_vid(vf_info, vlan);
1307		if (err < 0)
1308			return err;
1309
1310		vlan = (u16)err;
1311
1312		/* notify switch of request for new multicast address */
1313		err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
1314						 mac, vlan, set);
1315	}
1316
1317	return err;
1318}
1319
1320/**
1321 *  fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
1322 *  @vf_info: VF info structure containing capability flags
1323 *  @mode: Requested xcast mode
1324 *
1325 *  This function outputs the mode that most closely matches the requested
1326 *  mode.  If not modes match it will request we disable the port
1327 **/
1328static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
1329					    u8 mode)
1330{
1331	u8 vf_flags = vf_info->vf_flags;
1332
1333	/* match up mode to capabilities as best as possible */
1334	switch (mode) {
1335	case FM10K_XCAST_MODE_PROMISC:
1336		if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
1337			return FM10K_XCAST_MODE_PROMISC;
1338		/* fall through */
1339	case FM10K_XCAST_MODE_ALLMULTI:
1340		if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
1341			return FM10K_XCAST_MODE_ALLMULTI;
1342		/* fall through */
1343	case FM10K_XCAST_MODE_MULTI:
1344		if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
1345			return FM10K_XCAST_MODE_MULTI;
1346		/* fall through */
1347	case FM10K_XCAST_MODE_NONE:
1348		if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
1349			return FM10K_XCAST_MODE_NONE;
1350		/* fall through */
1351	default:
1352		break;
1353	}
1354
1355	/* disable interface as it should not be able to request any */
1356	return FM10K_XCAST_MODE_DISABLE;
1357}
1358
1359/**
1360 *  fm10k_iov_msg_lport_state_pf - Message handler for port state requests
1361 *  @hw: Pointer to hardware structure
1362 *  @results: Pointer array to message, results[0] is pointer to message
1363 *  @mbx: Pointer to mailbox information structure
1364 *
1365 *  This function is a default handler for port state requests.  The port
1366 *  state requests for now are basic and consist of enabling or disabling
1367 *  the port.
1368 **/
1369s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
1370				 struct fm10k_mbx_info *mbx)
1371{
1372	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1373	u32 *result;
1374	s32 err = 0;
1375	u32 msg[2];
1376	u8 mode = 0;
1377
1378	/* verify VF is allowed to enable even minimal mode */
1379	if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
1380		return FM10K_ERR_PARAM;
1381
1382	if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
1383		result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
1384
1385		/* XCAST mode update requested */
1386		err = fm10k_tlv_attr_get_u8(result, &mode);
1387		if (err)
1388			return FM10K_ERR_PARAM;
1389
1390		/* prep for possible demotion depending on capabilities */
1391		mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
1392
1393		/* if mode is not currently enabled, enable it */
1394		if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
1395			fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
1396
1397		/* swap mode back to a bit flag */
1398		mode = FM10K_VF_FLAG_SET_MODE(mode);
1399	} else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
1400		/* need to disable the port if it is already enabled */
1401		if (FM10K_VF_FLAG_ENABLED(vf_info))
1402			err = fm10k_update_lport_state_pf(hw, vf_info->glort,
1403							  1, false);
1404
1405		/* we need to clear VF_FLAG_ENABLED flags in order to ensure
1406		 * that we actually re-enable the LPORT state below. Note that
1407		 * this has no impact if the VF is already disabled, as the
1408		 * flags are already cleared.
1409		 */
1410		if (!err)
1411			vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
1412
1413		/* when enabling the port we should reset the rate limiters */
1414		hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
1415
1416		/* set mode for minimal functionality */
1417		mode = FM10K_VF_FLAG_SET_MODE_NONE;
1418
1419		/* generate port state response to notify VF it is ready */
1420		fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1421		fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
1422		mbx->ops.enqueue_tx(hw, mbx, msg);
1423	}
1424
1425	/* if enable state toggled note the update */
1426	if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
1427		err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
1428						  !!mode);
1429
1430	/* if state change succeeded, then update our stored state */
1431	mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
1432	if (!err)
1433		vf_info->vf_flags = mode;
1434
1435	return err;
1436}
1437
1438/**
1439 *  fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
1440 *  @hw: pointer to hardware structure
1441 *  @stats: pointer to the stats structure to update
1442 *
1443 *  This function collects and aggregates global and per queue hardware
1444 *  statistics.
1445 **/
1446static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
1447				     struct fm10k_hw_stats *stats)
1448{
1449	u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
1450	u32 id, id_prev;
1451
1452	/* Use Tx queue 0 as a canary to detect a reset */
1453	id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
1454
1455	/* Read Global Statistics */
1456	do {
1457		timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
1458						  &stats->timeout);
1459		ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
1460		ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
1461		um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
1462		xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
1463		vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
1464						    &stats->vlan_drop);
1465		loopback_drop =
1466			fm10k_read_hw_stats_32b(hw,
1467						FM10K_STATS_LOOPBACK_DROP,
1468						&stats->loopback_drop);
1469		nodesc_drop = fm10k_read_hw_stats_32b(hw,
1470						      FM10K_STATS_NODESC_DROP,
1471						      &stats->nodesc_drop);
1472
1473		/* if value has not changed then we have consistent data */
1474		id_prev = id;
1475		id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
1476	} while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
1477
1478	/* drop non-ID bits and set VALID ID bit */
1479	id &= FM10K_TXQCTL_ID_MASK;
1480	id |= FM10K_STAT_VALID;
1481
1482	/* Update Global Statistics */
1483	if (stats->stats_idx == id) {
1484		stats->timeout.count += timeout;
1485		stats->ur.count += ur;
1486		stats->ca.count += ca;
1487		stats->um.count += um;
1488		stats->xec.count += xec;
1489		stats->vlan_drop.count += vlan_drop;
1490		stats->loopback_drop.count += loopback_drop;
1491		stats->nodesc_drop.count += nodesc_drop;
1492	}
1493
1494	/* Update bases and record current PF id */
1495	fm10k_update_hw_base_32b(&stats->timeout, timeout);
1496	fm10k_update_hw_base_32b(&stats->ur, ur);
1497	fm10k_update_hw_base_32b(&stats->ca, ca);
1498	fm10k_update_hw_base_32b(&stats->um, um);
1499	fm10k_update_hw_base_32b(&stats->xec, xec);
1500	fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
1501	fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
1502	fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
1503	stats->stats_idx = id;
1504
1505	/* Update Queue Statistics */
1506	fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
1507}
1508
1509/**
1510 *  fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
1511 *  @hw: pointer to hardware structure
1512 *  @stats: pointer to the stats structure to update
1513 *
1514 *  This function resets the base for global and per queue hardware
1515 *  statistics.
1516 **/
1517static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
1518				     struct fm10k_hw_stats *stats)
1519{
1520	/* Unbind Global Statistics */
1521	fm10k_unbind_hw_stats_32b(&stats->timeout);
1522	fm10k_unbind_hw_stats_32b(&stats->ur);
1523	fm10k_unbind_hw_stats_32b(&stats->ca);
1524	fm10k_unbind_hw_stats_32b(&stats->um);
1525	fm10k_unbind_hw_stats_32b(&stats->xec);
1526	fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
1527	fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
1528	fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
1529
1530	/* Unbind Queue Statistics */
1531	fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
1532
1533	/* Reinitialize bases for all stats */
1534	fm10k_update_hw_stats_pf(hw, stats);
1535}
1536
1537/**
1538 *  fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
1539 *  @hw: pointer to hardware structure
1540 *  @dma_mask: 64 bit DMA mask required for platform
1541 *
1542 *  This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
1543 *  to limit the access to memory beyond what is physically in the system.
1544 **/
1545static void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
1546{
1547	/* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
1548	u32 phyaddr = (u32)(dma_mask >> 32);
1549
1550	fm10k_write_reg(hw, FM10K_PHYADDR, phyaddr);
1551}
1552
1553/**
1554 *  fm10k_get_fault_pf - Record a fault in one of the interface units
1555 *  @hw: pointer to hardware structure
1556 *  @type: pointer to fault type register offset
1557 *  @fault: pointer to memory location to record the fault
1558 *
1559 *  Record the fault register contents to the fault data structure and
1560 *  clear the entry from the register.
1561 *
1562 *  Returns ERR_PARAM if invalid register is specified or no error is present.
1563 **/
1564static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
1565			      struct fm10k_fault *fault)
1566{
1567	u32 func;
1568
1569	/* verify the fault register is in range and is aligned */
1570	switch (type) {
1571	case FM10K_PCA_FAULT:
1572	case FM10K_THI_FAULT:
1573	case FM10K_FUM_FAULT:
1574		break;
1575	default:
1576		return FM10K_ERR_PARAM;
1577	}
1578
1579	/* only service faults that are valid */
1580	func = fm10k_read_reg(hw, type + FM10K_FAULT_FUNC);
1581	if (!(func & FM10K_FAULT_FUNC_VALID))
1582		return FM10K_ERR_PARAM;
1583
1584	/* read remaining fields */
1585	fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
1586	fault->address <<= 32;
1587	fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
1588	fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
1589
1590	/* clear valid bit to allow for next error */
1591	fm10k_write_reg(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
1592
1593	/* Record which function triggered the error */
1594	if (func & FM10K_FAULT_FUNC_PF)
1595		fault->func = 0;
1596	else
1597		fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
1598				   FM10K_FAULT_FUNC_VF_SHIFT);
1599
1600	/* record fault type */
1601	fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
1602
1603	return 0;
1604}
1605
1606/**
1607 *  fm10k_request_lport_map_pf - Request LPORT map from the switch API
1608 *  @hw: pointer to hardware structure
1609 *
1610 **/
1611static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
1612{
1613	struct fm10k_mbx_info *mbx = &hw->mbx;
1614	u32 msg[1];
1615
1616	/* issue request asking for LPORT map */
1617	fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
1618
1619	/* load onto outgoing mailbox */
1620	return mbx->ops.enqueue_tx(hw, mbx, msg);
1621}
1622
1623/**
1624 *  fm10k_get_host_state_pf - Returns the state of the switch and mailbox
1625 *  @hw: pointer to hardware structure
1626 *  @switch_ready: pointer to boolean value that will record switch state
1627 *
1628 *  This function will check the DMA_CTRL2 register and mailbox in order
1629 *  to determine if the switch is ready for the PF to begin requesting
1630 *  addresses and mapping traffic to the local interface.
1631 **/
1632static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
1633{
1634	u32 dma_ctrl2;
1635
1636	/* verify the switch is ready for interaction */
1637	dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
1638	if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
1639		return 0;
1640
1641	/* retrieve generic host state info */
1642	return fm10k_get_host_state_generic(hw, switch_ready);
1643}
1644
1645/* This structure defines the attibutes to be parsed below */
1646const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
1647	FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
1648				 sizeof(struct fm10k_swapi_error)),
1649	FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
1650	FM10K_TLV_ATTR_LAST
1651};
1652
1653/**
1654 *  fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
1655 *  @hw: Pointer to hardware structure
1656 *  @results: pointer array containing parsed data
1657 *  @mbx: Pointer to mailbox information structure
1658 *
1659 *  This handler configures the lport mapping based on the reply from the
1660 *  switch API.
1661 **/
1662s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
1663			   struct fm10k_mbx_info *mbx)
1664{
1665	u16 glort, mask;
1666	u32 dglort_map;
1667	s32 err;
1668
1669	err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
1670				     &dglort_map);
1671	if (err)
1672		return err;
1673
1674	/* extract values out of the header */
1675	glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
1676	mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
1677
1678	/* verify mask is set and none of the masked bits in glort are set */
1679	if (!mask || (glort & ~mask))
1680		return FM10K_ERR_PARAM;
1681
1682	/* verify the mask is contiguous, and that it is 1's followed by 0's */
1683	if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
1684		return FM10K_ERR_PARAM;
1685
1686	/* record the glort, mask, and port count */
1687	hw->mac.dglort_map = dglort_map;
1688
1689	return 0;
1690}
1691
1692const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
1693	FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
1694	FM10K_TLV_ATTR_LAST
1695};
1696
1697/**
1698 *  fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
1699 *  @hw: Pointer to hardware structure
1700 *  @results: pointer array containing parsed data
1701 *  @mbx: Pointer to mailbox information structure
1702 *
1703 *  This handler configures the default VLAN for the PF
1704 **/
1705static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
1706				    struct fm10k_mbx_info *mbx)
1707{
1708	u16 glort, pvid;
1709	u32 pvid_update;
1710	s32 err;
1711
1712	err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
1713				     &pvid_update);
1714	if (err)
1715		return err;
1716
1717	/* extract values from the pvid update */
1718	glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
1719	pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
1720
1721	/* if glort is not valid return error */
1722	if (!fm10k_glort_valid_pf(hw, glort))
1723		return FM10K_ERR_PARAM;
1724
1725	/* verify VLAN ID is valid */
1726	if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
1727		return FM10K_ERR_PARAM;
1728
1729	/* record the port VLAN ID value */
1730	hw->mac.default_vid = pvid;
1731
1732	return 0;
1733}
1734
1735/**
1736 *  fm10k_record_global_table_data - Move global table data to swapi table info
1737 *  @from: pointer to source table data structure
1738 *  @to: pointer to destination table info structure
1739 *
1740 *  This function is will copy table_data to the table_info contained in
1741 *  the hw struct.
1742 **/
1743static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
1744					   struct fm10k_swapi_table_info *to)
1745{
1746	/* convert from le32 struct to CPU byte ordered values */
1747	to->used = le32_to_cpu(from->used);
1748	to->avail = le32_to_cpu(from->avail);
1749}
1750
1751const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
1752	FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
1753				 sizeof(struct fm10k_swapi_error)),
1754	FM10K_TLV_ATTR_LAST
1755};
1756
1757/**
1758 *  fm10k_msg_err_pf - Message handler for error reply
1759 *  @hw: Pointer to hardware structure
1760 *  @results: pointer array containing parsed data
1761 *  @mbx: Pointer to mailbox information structure
1762 *
1763 *  This handler will capture the data for any error replies to previous
1764 *  messages that the PF has sent.
1765 **/
1766s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
1767		     struct fm10k_mbx_info *mbx)
1768{
1769	struct fm10k_swapi_error err_msg;
1770	s32 err;
1771
1772	/* extract structure from message */
1773	err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
1774					   &err_msg, sizeof(err_msg));
1775	if (err)
1776		return err;
1777
1778	/* record table status */
1779	fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
1780	fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
1781	fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
1782
1783	/* record SW API status value */
1784	hw->swapi.status = le32_to_cpu(err_msg.status);
1785
1786	return 0;
1787}
1788
1789static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
1790	FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1791	FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1792	FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1793	FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1794	FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1795	FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1796	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1797};
1798
1799static const struct fm10k_mac_ops mac_ops_pf = {
1800	.get_bus_info		= fm10k_get_bus_info_generic,
1801	.reset_hw		= fm10k_reset_hw_pf,
1802	.init_hw		= fm10k_init_hw_pf,
1803	.start_hw		= fm10k_start_hw_generic,
1804	.stop_hw		= fm10k_stop_hw_generic,
1805	.update_vlan		= fm10k_update_vlan_pf,
1806	.read_mac_addr		= fm10k_read_mac_addr_pf,
1807	.update_uc_addr		= fm10k_update_uc_addr_pf,
1808	.update_mc_addr		= fm10k_update_mc_addr_pf,
1809	.update_xcast_mode	= fm10k_update_xcast_mode_pf,
1810	.update_int_moderator	= fm10k_update_int_moderator_pf,
1811	.update_lport_state	= fm10k_update_lport_state_pf,
1812	.update_hw_stats	= fm10k_update_hw_stats_pf,
1813	.rebind_hw_stats	= fm10k_rebind_hw_stats_pf,
1814	.configure_dglort_map	= fm10k_configure_dglort_map_pf,
1815	.set_dma_mask		= fm10k_set_dma_mask_pf,
1816	.get_fault		= fm10k_get_fault_pf,
1817	.get_host_state		= fm10k_get_host_state_pf,
1818	.request_lport_map	= fm10k_request_lport_map_pf,
1819};
1820
1821static const struct fm10k_iov_ops iov_ops_pf = {
1822	.assign_resources		= fm10k_iov_assign_resources_pf,
1823	.configure_tc			= fm10k_iov_configure_tc_pf,
1824	.assign_int_moderator		= fm10k_iov_assign_int_moderator_pf,
1825	.assign_default_mac_vlan	= fm10k_iov_assign_default_mac_vlan_pf,
1826	.reset_resources		= fm10k_iov_reset_resources_pf,
1827	.set_lport			= fm10k_iov_set_lport_pf,
1828	.reset_lport			= fm10k_iov_reset_lport_pf,
1829	.update_stats			= fm10k_iov_update_stats_pf,
1830};
1831
1832static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
1833{
1834	fm10k_get_invariants_generic(hw);
1835
1836	return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
1837}
1838
1839const struct fm10k_info fm10k_pf_info = {
1840	.mac		= fm10k_mac_pf,
1841	.get_invariants	= fm10k_get_invariants_pf,
1842	.mac_ops	= &mac_ops_pf,
1843	.iov_ops	= &iov_ops_pf,
1844};
v4.10.11
 
   1/* Intel(R) Ethernet Switch Host Interface Driver
   2 * Copyright(c) 2013 - 2016 Intel Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * The full GNU General Public License is included in this distribution in
  14 * the file called "COPYING".
  15 *
  16 * Contact Information:
  17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19 */
  20
  21#include "fm10k_pf.h"
  22#include "fm10k_vf.h"
  23
  24/**
  25 *  fm10k_reset_hw_pf - PF hardware reset
  26 *  @hw: pointer to hardware structure
  27 *
  28 *  This function should return the hardware to a state similar to the
  29 *  one it is in after being powered on.
  30 **/
  31static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
  32{
  33	s32 err;
  34	u32 reg;
  35	u16 i;
  36
  37	/* Disable interrupts */
  38	fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
  39
  40	/* Lock ITR2 reg 0 into itself and disable interrupt moderation */
  41	fm10k_write_reg(hw, FM10K_ITR2(0), 0);
  42	fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
  43
  44	/* We assume here Tx and Rx queue 0 are owned by the PF */
  45
  46	/* Shut off VF access to their queues forcing them to queue 0 */
  47	for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
  48		fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
  49		fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
  50	}
  51
  52	/* shut down all rings */
  53	err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
  54	if (err == FM10K_ERR_REQUESTS_PENDING) {
  55		hw->mac.reset_while_pending++;
  56		goto force_reset;
  57	} else if (err) {
  58		return err;
  59	}
  60
  61	/* Verify that DMA is no longer active */
  62	reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);
  63	if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
  64		return FM10K_ERR_DMA_PENDING;
  65
  66force_reset:
  67	/* Inititate data path reset */
  68	reg = FM10K_DMA_CTRL_DATAPATH_RESET;
  69	fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
  70
  71	/* Flush write and allow 100us for reset to complete */
  72	fm10k_write_flush(hw);
  73	udelay(FM10K_RESET_TIMEOUT);
  74
  75	/* Reset mailbox global interrupts */
  76	reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT;
  77	fm10k_write_reg(hw, FM10K_GMBX, reg);
  78
  79	/* Verify we made it out of reset */
  80	reg = fm10k_read_reg(hw, FM10K_IP);
  81	if (!(reg & FM10K_IP_NOTINRESET))
  82		return FM10K_ERR_RESET_FAILED;
  83
  84	return 0;
  85}
  86
  87/**
  88 *  fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
  89 *  @hw: pointer to hardware structure
  90 *
  91 *  Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
  92 **/
  93static bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
  94{
  95	u16 sriov_ctrl = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_SRIOV_CTRL);
  96
  97	return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
  98}
  99
 100/**
 101 *  fm10k_init_hw_pf - PF hardware initialization
 102 *  @hw: pointer to hardware structure
 103 *
 104 **/
 105static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
 106{
 107	u32 dma_ctrl, txqctl;
 108	u16 i;
 109
 110	/* Establish default VSI as valid */
 111	fm10k_write_reg(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
 112	fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
 113			FM10K_DGLORTMAP_ANY);
 114
 115	/* Invalidate all other GLORT entries */
 116	for (i = 1; i < FM10K_DGLORT_COUNT; i++)
 117		fm10k_write_reg(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
 118
 119	/* reset ITR2(0) to point to itself */
 120	fm10k_write_reg(hw, FM10K_ITR2(0), 0);
 121
 122	/* reset VF ITR2(0) to point to 0 avoid PF registers */
 123	fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
 124
 125	/* loop through all PF ITR2 registers pointing them to the previous */
 126	for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
 127		fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
 128
 129	/* Enable interrupt moderator if not already enabled */
 130	fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
 131
 132	/* compute the default txqctl configuration */
 133	txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
 134		 (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
 135
 136	for (i = 0; i < FM10K_MAX_QUEUES; i++) {
 137		/* configure rings for 256 Queue / 32 Descriptor cache mode */
 138		fm10k_write_reg(hw, FM10K_TQDLOC(i),
 139				(i * FM10K_TQDLOC_BASE_32_DESC) |
 140				FM10K_TQDLOC_SIZE_32_DESC);
 141		fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
 142
 143		/* configure rings to provide TPH processing hints */
 144		fm10k_write_reg(hw, FM10K_TPH_TXCTRL(i),
 145				FM10K_TPH_TXCTRL_DESC_TPHEN |
 146				FM10K_TPH_TXCTRL_DESC_RROEN |
 147				FM10K_TPH_TXCTRL_DESC_WROEN |
 148				FM10K_TPH_TXCTRL_DATA_RROEN);
 149		fm10k_write_reg(hw, FM10K_TPH_RXCTRL(i),
 150				FM10K_TPH_RXCTRL_DESC_TPHEN |
 151				FM10K_TPH_RXCTRL_DESC_RROEN |
 152				FM10K_TPH_RXCTRL_DATA_WROEN |
 153				FM10K_TPH_RXCTRL_HDR_WROEN);
 154	}
 155
 156	/* set max hold interval to align with 1.024 usec in all modes and
 157	 * store ITR scale
 158	 */
 159	switch (hw->bus.speed) {
 160	case fm10k_bus_speed_2500:
 161		dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
 162		hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1;
 163		break;
 164	case fm10k_bus_speed_5000:
 165		dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
 166		hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2;
 167		break;
 168	case fm10k_bus_speed_8000:
 169		dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
 170		hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
 171		break;
 172	default:
 173		dma_ctrl = 0;
 174		/* just in case, assume Gen3 ITR scale */
 175		hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
 176		break;
 177	}
 178
 179	/* Configure TSO flags */
 180	fm10k_write_reg(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
 181	fm10k_write_reg(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
 182
 183	/* Enable DMA engine
 184	 * Set Rx Descriptor size to 32
 185	 * Set Minimum MSS to 64
 186	 * Set Maximum number of Rx queues to 256 / 32 Descriptor
 187	 */
 188	dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
 189		    FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
 190		    FM10K_DMA_CTRL_32_DESC;
 191
 192	fm10k_write_reg(hw, FM10K_DMA_CTRL, dma_ctrl);
 193
 194	/* record maximum queue count, we limit ourselves to 128 */
 195	hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
 196
 197	/* We support either 64 VFs or 7 VFs depending on if we have ARI */
 198	hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
 199
 200	return 0;
 201}
 202
 203/**
 204 *  fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
 205 *  @hw: pointer to hardware structure
 206 *  @vid: VLAN ID to add to table
 207 *  @vsi: Index indicating VF ID or PF ID in table
 208 *  @set: Indicates if this is a set or clear operation
 209 *
 210 *  This function adds or removes the corresponding VLAN ID from the VLAN
 211 *  filter table for the corresponding function.  In addition to the
 212 *  standard set/clear that supports one bit a multi-bit write is
 213 *  supported to set 64 bits at a time.
 214 **/
 215static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
 216{
 217	u32 vlan_table, reg, mask, bit, len;
 218
 219	/* verify the VSI index is valid */
 220	if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
 221		return FM10K_ERR_PARAM;
 222
 223	/* VLAN multi-bit write:
 224	 * The multi-bit write has several parts to it.
 225	 *               24              16               8               0
 226	 *  7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
 227	 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 228	 * | RSVD0 |         Length        |C|RSVD0|        VLAN ID        |
 229	 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 230	 *
 231	 * VLAN ID: Vlan Starting value
 232	 * RSVD0: Reserved section, must be 0
 233	 * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
 234	 * Length: Number of times to repeat the bit being set
 235	 */
 236	len = vid >> 16;
 237	vid = (vid << 17) >> 17;
 238
 239	/* verify the reserved 0 fields are 0 */
 240	if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
 241		return FM10K_ERR_PARAM;
 242
 243	/* Loop through the table updating all required VLANs */
 244	for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
 245	     len < FM10K_VLAN_TABLE_VID_MAX;
 246	     len -= 32 - bit, reg++, bit = 0) {
 247		/* record the initial state of the register */
 248		vlan_table = fm10k_read_reg(hw, reg);
 249
 250		/* truncate mask if we are at the start or end of the run */
 251		mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
 252
 253		/* make necessary modifications to the register */
 254		mask &= set ? ~vlan_table : vlan_table;
 255		if (mask)
 256			fm10k_write_reg(hw, reg, vlan_table ^ mask);
 257	}
 258
 259	return 0;
 260}
 261
 262/**
 263 *  fm10k_read_mac_addr_pf - Read device MAC address
 264 *  @hw: pointer to the HW structure
 265 *
 266 *  Reads the device MAC address from the SM_AREA and stores the value.
 267 **/
 268static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
 269{
 270	u8 perm_addr[ETH_ALEN];
 271	u32 serial_num;
 272
 273	serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1));
 274
 275	/* last byte should be all 1's */
 276	if ((~serial_num) << 24)
 277		return  FM10K_ERR_INVALID_MAC_ADDR;
 278
 279	perm_addr[0] = (u8)(serial_num >> 24);
 280	perm_addr[1] = (u8)(serial_num >> 16);
 281	perm_addr[2] = (u8)(serial_num >> 8);
 282
 283	serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(0));
 284
 285	/* first byte should be all 1's */
 286	if ((~serial_num) >> 24)
 287		return  FM10K_ERR_INVALID_MAC_ADDR;
 288
 289	perm_addr[3] = (u8)(serial_num >> 16);
 290	perm_addr[4] = (u8)(serial_num >> 8);
 291	perm_addr[5] = (u8)(serial_num);
 292
 293	ether_addr_copy(hw->mac.perm_addr, perm_addr);
 294	ether_addr_copy(hw->mac.addr, perm_addr);
 295
 296	return 0;
 297}
 298
 299/**
 300 *  fm10k_glort_valid_pf - Validate that the provided glort is valid
 301 *  @hw: pointer to the HW structure
 302 *  @glort: base glort to be validated
 303 *
 304 *  This function will return an error if the provided glort is invalid
 305 **/
 306bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
 307{
 308	glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
 309
 310	return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
 311}
 312
 313/**
 314 *  fm10k_update_xc_addr_pf - Update device addresses
 315 *  @hw: pointer to the HW structure
 316 *  @glort: base resource tag for this request
 317 *  @mac: MAC address to add/remove from table
 318 *  @vid: VLAN ID to add/remove from table
 319 *  @add: Indicates if this is an add or remove operation
 320 *  @flags: flags field to indicate add and secure
 321 *
 322 *  This function generates a message to the Switch API requesting
 323 *  that the given logical port add/remove the given L2 MAC/VLAN address.
 324 **/
 325static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
 326				   const u8 *mac, u16 vid, bool add, u8 flags)
 327{
 328	struct fm10k_mbx_info *mbx = &hw->mbx;
 329	struct fm10k_mac_update mac_update;
 330	u32 msg[5];
 331
 332	/* clear set bit from VLAN ID */
 333	vid &= ~FM10K_VLAN_CLEAR;
 334
 335	/* if glort or VLAN are not valid return error */
 336	if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
 337		return FM10K_ERR_PARAM;
 338
 339	/* record fields */
 340	mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
 341						 ((u32)mac[3] << 16) |
 342						 ((u32)mac[4] << 8) |
 343						 ((u32)mac[5]));
 344	mac_update.mac_upper = cpu_to_le16(((u16)mac[0] << 8) |
 345					   ((u16)mac[1]));
 346	mac_update.vlan = cpu_to_le16(vid);
 347	mac_update.glort = cpu_to_le16(glort);
 348	mac_update.action = add ? 0 : 1;
 349	mac_update.flags = flags;
 350
 351	/* populate mac_update fields */
 352	fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
 353	fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
 354				     &mac_update, sizeof(mac_update));
 355
 356	/* load onto outgoing mailbox */
 357	return mbx->ops.enqueue_tx(hw, mbx, msg);
 358}
 359
 360/**
 361 *  fm10k_update_uc_addr_pf - Update device unicast addresses
 362 *  @hw: pointer to the HW structure
 363 *  @glort: base resource tag for this request
 364 *  @mac: MAC address to add/remove from table
 365 *  @vid: VLAN ID to add/remove from table
 366 *  @add: Indicates if this is an add or remove operation
 367 *  @flags: flags field to indicate add and secure
 368 *
 369 *  This function is used to add or remove unicast addresses for
 370 *  the PF.
 371 **/
 372static s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
 373				   const u8 *mac, u16 vid, bool add, u8 flags)
 374{
 375	/* verify MAC address is valid */
 376	if (!is_valid_ether_addr(mac))
 377		return FM10K_ERR_PARAM;
 378
 379	return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
 380}
 381
 382/**
 383 *  fm10k_update_mc_addr_pf - Update device multicast addresses
 384 *  @hw: pointer to the HW structure
 385 *  @glort: base resource tag for this request
 386 *  @mac: MAC address to add/remove from table
 387 *  @vid: VLAN ID to add/remove from table
 388 *  @add: Indicates if this is an add or remove operation
 389 *
 390 *  This function is used to add or remove multicast MAC addresses for
 391 *  the PF.
 392 **/
 393static s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
 394				   const u8 *mac, u16 vid, bool add)
 395{
 396	/* verify multicast address is valid */
 397	if (!is_multicast_ether_addr(mac))
 398		return FM10K_ERR_PARAM;
 399
 400	return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
 401}
 402
 403/**
 404 *  fm10k_update_xcast_mode_pf - Request update of multicast mode
 405 *  @hw: pointer to hardware structure
 406 *  @glort: base resource tag for this request
 407 *  @mode: integer value indicating mode being requested
 408 *
 409 *  This function will attempt to request a higher mode for the port
 410 *  so that it can enable either multicast, multicast promiscuous, or
 411 *  promiscuous mode of operation.
 412 **/
 413static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
 414{
 415	struct fm10k_mbx_info *mbx = &hw->mbx;
 416	u32 msg[3], xcast_mode;
 417
 418	if (mode > FM10K_XCAST_MODE_NONE)
 419		return FM10K_ERR_PARAM;
 420
 421	/* if glort is not valid return error */
 422	if (!fm10k_glort_valid_pf(hw, glort))
 423		return FM10K_ERR_PARAM;
 424
 425	/* write xcast mode as a single u32 value,
 426	 * lower 16 bits: glort
 427	 * upper 16 bits: mode
 428	 */
 429	xcast_mode = ((u32)mode << 16) | glort;
 430
 431	/* generate message requesting to change xcast mode */
 432	fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
 433	fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
 434
 435	/* load onto outgoing mailbox */
 436	return mbx->ops.enqueue_tx(hw, mbx, msg);
 437}
 438
 439/**
 440 *  fm10k_update_int_moderator_pf - Update interrupt moderator linked list
 441 *  @hw: pointer to hardware structure
 442 *
 443 *  This function walks through the MSI-X vector table to determine the
 444 *  number of active interrupts and based on that information updates the
 445 *  interrupt moderator linked list.
 446 **/
 447static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
 448{
 449	u32 i;
 450
 451	/* Disable interrupt moderator */
 452	fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
 453
 454	/* loop through PF from last to first looking enabled vectors */
 455	for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
 456		if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
 457			break;
 458	}
 459
 460	/* always reset VFITR2[0] to point to last enabled PF vector */
 461	fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
 462
 463	/* reset ITR2[0] to point to last enabled PF vector */
 464	if (!hw->iov.num_vfs)
 465		fm10k_write_reg(hw, FM10K_ITR2(0), i);
 466
 467	/* Enable interrupt moderator */
 468	fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
 469}
 470
 471/**
 472 *  fm10k_update_lport_state_pf - Notify the switch of a change in port state
 473 *  @hw: pointer to the HW structure
 474 *  @glort: base resource tag for this request
 475 *  @count: number of logical ports being updated
 476 *  @enable: boolean value indicating enable or disable
 477 *
 478 *  This function is used to add/remove a logical port from the switch.
 479 **/
 480static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
 481				       u16 count, bool enable)
 482{
 483	struct fm10k_mbx_info *mbx = &hw->mbx;
 484	u32 msg[3], lport_msg;
 485
 486	/* do nothing if we are being asked to create or destroy 0 ports */
 487	if (!count)
 488		return 0;
 489
 490	/* if glort is not valid return error */
 491	if (!fm10k_glort_valid_pf(hw, glort))
 492		return FM10K_ERR_PARAM;
 493
 494	/* reset multicast mode if deleting lport */
 495	if (!enable)
 496		fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
 497
 498	/* construct the lport message from the 2 pieces of data we have */
 499	lport_msg = ((u32)count << 16) | glort;
 500
 501	/* generate lport create/delete message */
 502	fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
 503					 FM10K_PF_MSG_ID_LPORT_DELETE);
 504	fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
 505
 506	/* load onto outgoing mailbox */
 507	return mbx->ops.enqueue_tx(hw, mbx, msg);
 508}
 509
 510/**
 511 *  fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
 512 *  @hw: pointer to hardware structure
 513 *  @dglort: pointer to dglort configuration structure
 514 *
 515 *  Reads the configuration structure contained in dglort_cfg and uses
 516 *  that information to then populate a DGLORTMAP/DEC entry and the queues
 517 *  to which it has been assigned.
 518 **/
 519static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
 520					 struct fm10k_dglort_cfg *dglort)
 521{
 522	u16 glort, queue_count, vsi_count, pc_count;
 523	u16 vsi, queue, pc, q_idx;
 524	u32 txqctl, dglortdec, dglortmap;
 525
 526	/* verify the dglort pointer */
 527	if (!dglort)
 528		return FM10K_ERR_PARAM;
 529
 530	/* verify the dglort values */
 531	if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
 532	    (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
 533	    (dglort->queue_l > 8) || (dglort->queue_b >= 256))
 534		return FM10K_ERR_PARAM;
 535
 536	/* determine count of VSIs and queues */
 537	queue_count = BIT(dglort->rss_l + dglort->pc_l);
 538	vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
 539	glort = dglort->glort;
 540	q_idx = dglort->queue_b;
 541
 542	/* configure SGLORT for queues */
 543	for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
 544		for (queue = 0; queue < queue_count; queue++, q_idx++) {
 545			if (q_idx >= FM10K_MAX_QUEUES)
 546				break;
 547
 548			fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort);
 549			fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort);
 550		}
 551	}
 552
 553	/* determine count of PCs and queues */
 554	queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
 555	pc_count = BIT(dglort->pc_l);
 556
 557	/* configure PC for Tx queues */
 558	for (pc = 0; pc < pc_count; pc++) {
 559		q_idx = pc + dglort->queue_b;
 560		for (queue = 0; queue < queue_count; queue++) {
 561			if (q_idx >= FM10K_MAX_QUEUES)
 562				break;
 563
 564			txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx));
 565			txqctl &= ~FM10K_TXQCTL_PC_MASK;
 566			txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
 567			fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl);
 568
 569			q_idx += pc_count;
 570		}
 571	}
 572
 573	/* configure DGLORTDEC */
 574	dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
 575		    ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
 576		    ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
 577		    ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
 578		    ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
 579		    ((u32)(dglort->queue_l));
 580	if (dglort->inner_rss)
 581		dglortdec |=  FM10K_DGLORTDEC_INNERRSS_ENABLE;
 582
 583	/* configure DGLORTMAP */
 584	dglortmap = (dglort->idx == fm10k_dglort_default) ?
 585			FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
 586	dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
 587	dglortmap |= dglort->glort;
 588
 589	/* write values to hardware */
 590	fm10k_write_reg(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
 591	fm10k_write_reg(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
 592
 593	return 0;
 594}
 595
 596u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
 597{
 598	u16 num_pools = hw->iov.num_pools;
 599
 600	return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
 601	       8 : FM10K_MAX_QUEUES_POOL;
 602}
 603
 604u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
 605{
 606	u16 num_vfs = hw->iov.num_vfs;
 607	u16 vf_q_idx = FM10K_MAX_QUEUES;
 608
 609	vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
 610
 611	return vf_q_idx;
 612}
 613
 614static u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
 615{
 616	u16 num_pools = hw->iov.num_pools;
 617
 618	return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
 619	       FM10K_MAX_VECTORS_POOL;
 620}
 621
 622static u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
 623{
 624	u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
 625
 626	vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
 627
 628	return vf_v_idx;
 629}
 630
 631/**
 632 *  fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
 633 *  @hw: pointer to the HW structure
 634 *  @num_vfs: number of VFs to be allocated
 635 *  @num_pools: number of virtualization pools to be allocated
 636 *
 637 *  Allocates queues and traffic classes to virtualization entities to prepare
 638 *  the PF for SR-IOV and VMDq
 639 **/
 640static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
 641					 u16 num_pools)
 642{
 643	u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
 644	u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
 645	int i, j;
 646
 647	/* hardware only supports up to 64 pools */
 648	if (num_pools > 64)
 649		return FM10K_ERR_PARAM;
 650
 651	/* the number of VFs cannot exceed the number of pools */
 652	if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
 653		return FM10K_ERR_PARAM;
 654
 655	/* record number of virtualization entities */
 656	hw->iov.num_vfs = num_vfs;
 657	hw->iov.num_pools = num_pools;
 658
 659	/* determine qmap offsets and counts */
 660	qmap_stride = (num_vfs > 8) ? 32 : 256;
 661	qpp = fm10k_queues_per_pool(hw);
 662	vpp = fm10k_vectors_per_pool(hw);
 663
 664	/* calculate starting index for queues */
 665	vf_q_idx = fm10k_vf_queue_index(hw, 0);
 666	qmap_idx = 0;
 667
 668	/* establish TCs with -1 credits and no quanta to prevent transmit */
 669	for (i = 0; i < num_vfs; i++) {
 670		fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(i), 0);
 671		fm10k_write_reg(hw, FM10K_TC_RATE(i), 0);
 672		fm10k_write_reg(hw, FM10K_TC_CREDIT(i),
 673				FM10K_TC_CREDIT_CREDIT_MASK);
 674	}
 675
 676	/* zero out all mbmem registers */
 677	for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
 678		fm10k_write_reg(hw, FM10K_MBMEM(i), 0);
 679
 680	/* clear event notification of VF FLR */
 681	fm10k_write_reg(hw, FM10K_PFVFLREC(0), ~0);
 682	fm10k_write_reg(hw, FM10K_PFVFLREC(1), ~0);
 683
 684	/* loop through unallocated rings assigning them back to PF */
 685	for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
 686		fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
 687		fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
 688				FM10K_TXQCTL_UNLIMITED_BW | vid);
 689		fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
 690	}
 691
 692	/* PF should have already updated VFITR2[0] */
 693
 694	/* update all ITR registers to flow to VFITR2[0] */
 695	for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
 696		if (!(i & (vpp - 1)))
 697			fm10k_write_reg(hw, FM10K_ITR2(i), i - vpp);
 698		else
 699			fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
 700	}
 701
 702	/* update PF ITR2[0] to reference the last vector */
 703	fm10k_write_reg(hw, FM10K_ITR2(0),
 704			fm10k_vf_vector_index(hw, num_vfs - 1));
 705
 706	/* loop through rings populating rings and TCs */
 707	for (i = 0; i < num_vfs; i++) {
 708		/* record index for VF queue 0 for use in end of loop */
 709		vf_q_idx0 = vf_q_idx;
 710
 711		for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
 712			/* assign VF and locked TC to queues */
 713			fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
 714			fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx),
 715					(i << FM10K_TXQCTL_TC_SHIFT) | i |
 716					FM10K_TXQCTL_VF | vid);
 717			fm10k_write_reg(hw, FM10K_RXDCTL(vf_q_idx),
 718					FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
 719					FM10K_RXDCTL_DROP_ON_EMPTY);
 720			fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
 721					(i << FM10K_RXQCTL_VF_SHIFT) |
 722					FM10K_RXQCTL_VF);
 723
 724			/* map queue pair to VF */
 725			fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
 726			fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
 727		}
 728
 729		/* repeat the first ring for all of the remaining VF rings */
 730		for (; j < qmap_stride; j++, qmap_idx++) {
 731			fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
 732			fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
 733		}
 734	}
 735
 736	/* loop through remaining indexes assigning all to queue 0 */
 737	while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
 738		fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
 739		fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), 0);
 740		qmap_idx++;
 741	}
 742
 743	return 0;
 744}
 745
 746/**
 747 *  fm10k_iov_configure_tc_pf - Configure the shaping group for VF
 748 *  @hw: pointer to the HW structure
 749 *  @vf_idx: index of VF receiving GLORT
 750 *  @rate: Rate indicated in Mb/s
 751 *
 752 *  Configured the TC for a given VF to allow only up to a given number
 753 *  of Mb/s of outgoing Tx throughput.
 754 **/
 755static s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
 756{
 757	/* configure defaults */
 758	u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
 759	u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
 760
 761	/* verify vf is in range */
 762	if (vf_idx >= hw->iov.num_vfs)
 763		return FM10K_ERR_PARAM;
 764
 765	/* set interval to align with 4.096 usec in all modes */
 766	switch (hw->bus.speed) {
 767	case fm10k_bus_speed_2500:
 768		interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
 769		break;
 770	case fm10k_bus_speed_5000:
 771		interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
 772		break;
 773	default:
 774		break;
 775	}
 776
 777	if (rate) {
 778		if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
 779			return FM10K_ERR_PARAM;
 780
 781		/* The quanta is measured in Bytes per 4.096 or 8.192 usec
 782		 * The rate is provided in Mbits per second
 783		 * To tralslate from rate to quanta we need to multiply the
 784		 * rate by 8.192 usec and divide by 8 bits/byte.  To avoid
 785		 * dealing with floating point we can round the values up
 786		 * to the nearest whole number ratio which gives us 128 / 125.
 787		 */
 788		tc_rate = (rate * 128) / 125;
 789
 790		/* try to keep the rate limiting accurate by increasing
 791		 * the number of credits and interval for rates less than 4Gb/s
 792		 */
 793		if (rate < 4000)
 794			interval <<= 1;
 795		else
 796			tc_rate >>= 1;
 797	}
 798
 799	/* update rate limiter with new values */
 800	fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
 801	fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
 802	fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
 803
 804	return 0;
 805}
 806
 807/**
 808 *  fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
 809 *  @hw: pointer to the HW structure
 810 *  @vf_idx: index of VF receiving GLORT
 811 *
 812 *  Update the interrupt moderator linked list to include any MSI-X
 813 *  interrupts which the VF has enabled in the MSI-X vector table.
 814 **/
 815static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
 816{
 817	u16 vf_v_idx, vf_v_limit, i;
 818
 819	/* verify vf is in range */
 820	if (vf_idx >= hw->iov.num_vfs)
 821		return FM10K_ERR_PARAM;
 822
 823	/* determine vector offset and count */
 824	vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
 825	vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
 826
 827	/* search for first vector that is not masked */
 828	for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
 829		if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
 830			break;
 831	}
 832
 833	/* reset linked list so it now includes our active vectors */
 834	if (vf_idx == (hw->iov.num_vfs - 1))
 835		fm10k_write_reg(hw, FM10K_ITR2(0), i);
 836	else
 837		fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), i);
 838
 839	return 0;
 840}
 841
 842/**
 843 *  fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
 844 *  @hw: pointer to the HW structure
 845 *  @vf_info: pointer to VF information structure
 846 *
 847 *  Assign a MAC address and default VLAN to a VF and notify it of the update
 848 **/
 849static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
 850						struct fm10k_vf_info *vf_info)
 851{
 852	u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
 853	u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
 854	s32 err = 0;
 855	u16 vf_idx, vf_vid;
 856
 857	/* verify vf is in range */
 858	if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
 859		return FM10K_ERR_PARAM;
 860
 861	/* determine qmap offsets and counts */
 862	qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
 863	queues_per_pool = fm10k_queues_per_pool(hw);
 864
 865	/* calculate starting index for queues */
 866	vf_idx = vf_info->vf_idx;
 867	vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
 868	qmap_idx = qmap_stride * vf_idx;
 869
 870	/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
 871	 * used here to indicate to the VF that it will not have privilege to
 872	 * write VLAN_TABLE. All policy is enforced on the PF but this allows
 873	 * the VF to correctly report errors to userspace rqeuests.
 874	 */
 875	if (vf_info->pf_vid)
 876		vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
 877	else
 878		vf_vid = vf_info->sw_vid;
 879
 880	/* generate MAC_ADDR request */
 881	fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
 882	fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
 883				    vf_info->mac, vf_vid);
 884
 885	/* Configure Queue control register with new VLAN ID. The TXQCTL
 886	 * register is RO from the VF, so the PF must do this even in the
 887	 * case of notifying the VF of a new VID via the mailbox.
 888	 */
 889	txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
 890		 FM10K_TXQCTL_VID_MASK;
 891	txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
 892		  FM10K_TXQCTL_VF | vf_idx;
 893
 894	for (i = 0; i < queues_per_pool; i++)
 895		fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
 896
 897	/* try loading a message onto outgoing mailbox first */
 898	if (vf_info->mbx.ops.enqueue_tx) {
 899		err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
 900		if (err != FM10K_MBX_ERR_NO_MBX)
 901			return err;
 902		err = 0;
 903	}
 904
 905	/* If we aren't connected to a mailbox, this is most likely because
 906	 * the VF driver is not running. It should thus be safe to re-map
 907	 * queues and use the registers to pass the MAC address so that the VF
 908	 * driver gets correct information during its initialization.
 909	 */
 910
 911	/* MAP Tx queue back to 0 temporarily, and disable it */
 912	fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
 913	fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
 914
 915	/* verify ring has disabled before modifying base address registers */
 916	txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
 917	for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
 918		/* limit ourselves to a 1ms timeout */
 919		if (timeout == 10) {
 920			err = FM10K_ERR_DMA_PENDING;
 921			goto err_out;
 922		}
 923
 924		usleep_range(100, 200);
 925		txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
 926	}
 927
 928	/* Update base address registers to contain MAC address */
 929	if (is_valid_ether_addr(vf_info->mac)) {
 930		tdbal = (((u32)vf_info->mac[3]) << 24) |
 931			(((u32)vf_info->mac[4]) << 16) |
 932			(((u32)vf_info->mac[5]) << 8);
 933
 934		tdbah = (((u32)0xFF)	        << 24) |
 935			(((u32)vf_info->mac[0]) << 16) |
 936			(((u32)vf_info->mac[1]) << 8) |
 937			((u32)vf_info->mac[2]);
 938	}
 939
 940	/* Record the base address into queue 0 */
 941	fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal);
 942	fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah);
 943
 944	/* Provide the VF the ITR scale, using software-defined fields in TDLEN
 945	 * to pass the information during VF initialization. See definition of
 946	 * FM10K_TDLEN_ITR_SCALE_SHIFT for more details.
 947	 */
 948	fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale <<
 949						   FM10K_TDLEN_ITR_SCALE_SHIFT);
 950
 951err_out:
 952	/* restore the queue back to VF ownership */
 953	fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
 954	return err;
 955}
 956
 957/**
 958 *  fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
 959 *  @hw: pointer to the HW structure
 960 *  @vf_info: pointer to VF information structure
 961 *
 962 *  Reassign the interrupts and queues to a VF following an FLR
 963 **/
 964static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
 965					struct fm10k_vf_info *vf_info)
 966{
 967	u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
 968	u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
 969	u16 vf_v_idx, vf_v_limit, vf_vid;
 970	u8 vf_idx = vf_info->vf_idx;
 971	int i;
 972
 973	/* verify vf is in range */
 974	if (vf_idx >= hw->iov.num_vfs)
 975		return FM10K_ERR_PARAM;
 976
 977	/* clear event notification of VF FLR */
 978	fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
 979
 980	/* force timeout and then disconnect the mailbox */
 981	vf_info->mbx.timeout = 0;
 982	if (vf_info->mbx.ops.disconnect)
 983		vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
 984
 985	/* determine vector offset and count */
 986	vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
 987	vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
 988
 989	/* determine qmap offsets and counts */
 990	qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
 991	queues_per_pool = fm10k_queues_per_pool(hw);
 992	qmap_idx = qmap_stride * vf_idx;
 993
 994	/* make all the queues inaccessible to the VF */
 995	for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
 996		fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
 997		fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
 998	}
 999
1000	/* calculate starting index for queues */
1001	vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
1002
1003	/* determine correct default VLAN ID */
1004	if (vf_info->pf_vid)
1005		vf_vid = vf_info->pf_vid;
1006	else
1007		vf_vid = vf_info->sw_vid;
1008
1009	/* configure Queue control register */
1010	txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
1011		 (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
1012		 FM10K_TXQCTL_VF | vf_idx;
1013	rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
1014
1015	/* stop further DMA and reset queue ownership back to VF */
1016	for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
1017		fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
1018		fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
1019		fm10k_write_reg(hw, FM10K_RXDCTL(i),
1020				FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
1021				FM10K_RXDCTL_DROP_ON_EMPTY);
1022		fm10k_write_reg(hw, FM10K_RXQCTL(i), rxqctl);
1023	}
1024
1025	/* reset TC with -1 credits and no quanta to prevent transmit */
1026	fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
1027	fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), 0);
1028	fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx),
1029			FM10K_TC_CREDIT_CREDIT_MASK);
1030
1031	/* update our first entry in the table based on previous VF */
1032	if (!vf_idx)
1033		hw->mac.ops.update_int_moderator(hw);
1034	else
1035		hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
1036
1037	/* reset linked list so it now includes our active vectors */
1038	if (vf_idx == (hw->iov.num_vfs - 1))
1039		fm10k_write_reg(hw, FM10K_ITR2(0), vf_v_idx);
1040	else
1041		fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
1042
1043	/* link remaining vectors so that next points to previous */
1044	for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
1045		fm10k_write_reg(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
1046
1047	/* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
1048	for (i = FM10K_VFMBMEM_LEN; i--;)
1049		fm10k_write_reg(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
1050	for (i = FM10K_VLAN_TABLE_SIZE; i--;)
1051		fm10k_write_reg(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
1052	for (i = FM10K_RETA_SIZE; i--;)
1053		fm10k_write_reg(hw, FM10K_RETA(vf_info->vsi, i), 0);
1054	for (i = FM10K_RSSRK_SIZE; i--;)
1055		fm10k_write_reg(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
1056	fm10k_write_reg(hw, FM10K_MRQC(vf_info->vsi), 0);
1057
1058	/* Update base address registers to contain MAC address */
1059	if (is_valid_ether_addr(vf_info->mac)) {
1060		tdbal = (((u32)vf_info->mac[3]) << 24) |
1061			(((u32)vf_info->mac[4]) << 16) |
1062			(((u32)vf_info->mac[5]) << 8);
1063		tdbah = (((u32)0xFF)	   << 24) |
1064			(((u32)vf_info->mac[0]) << 16) |
1065			(((u32)vf_info->mac[1]) << 8) |
1066			((u32)vf_info->mac[2]);
1067	}
1068
1069	/* map queue pairs back to VF from last to first */
1070	for (i = queues_per_pool; i--;) {
1071		fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
1072		fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
1073		/* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an
1074		 * explanation of how TDLEN is used.
1075		 */
1076		fm10k_write_reg(hw, FM10K_TDLEN(vf_q_idx + i),
1077				hw->mac.itr_scale <<
1078				FM10K_TDLEN_ITR_SCALE_SHIFT);
1079		fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
1080		fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
1081	}
1082
1083	/* repeat the first ring for all the remaining VF rings */
1084	for (i = queues_per_pool; i < qmap_stride; i++) {
1085		fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
1086		fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
1087	}
1088
1089	return 0;
1090}
1091
1092/**
1093 *  fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
1094 *  @hw: pointer to hardware structure
1095 *  @vf_info: pointer to VF information structure
1096 *  @lport_idx: Logical port offset from the hardware glort
1097 *  @flags: Set of capability flags to extend port beyond basic functionality
1098 *
1099 *  This function allows enabling a VF port by assigning it a GLORT and
1100 *  setting the flags so that it can enable an Rx mode.
1101 **/
1102static s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
1103				  struct fm10k_vf_info *vf_info,
1104				  u16 lport_idx, u8 flags)
1105{
1106	u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
1107
1108	/* if glort is not valid return error */
1109	if (!fm10k_glort_valid_pf(hw, glort))
1110		return FM10K_ERR_PARAM;
1111
1112	vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
1113	vf_info->glort = glort;
1114
1115	return 0;
1116}
1117
1118/**
1119 *  fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
1120 *  @hw: pointer to hardware structure
1121 *  @vf_info: pointer to VF information structure
1122 *
1123 *  This function disables a VF port by stripping it of a GLORT and
1124 *  setting the flags so that it cannot enable any Rx mode.
1125 **/
1126static void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
1127				     struct fm10k_vf_info *vf_info)
1128{
1129	u32 msg[1];
1130
1131	/* need to disable the port if it is already enabled */
1132	if (FM10K_VF_FLAG_ENABLED(vf_info)) {
1133		/* notify switch that this port has been disabled */
1134		fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
1135
1136		/* generate port state response to notify VF it is not ready */
1137		fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1138		vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
1139	}
1140
1141	/* clear flags and glort if it exists */
1142	vf_info->vf_flags = 0;
1143	vf_info->glort = 0;
1144}
1145
1146/**
1147 *  fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
1148 *  @hw: pointer to hardware structure
1149 *  @q: stats for all queues of a VF
1150 *  @vf_idx: index of VF
1151 *
1152 *  This function collects queue stats for VFs.
1153 **/
1154static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
1155				      struct fm10k_hw_stats_q *q,
1156				      u16 vf_idx)
1157{
1158	u32 idx, qpp;
1159
1160	/* get stats for all of the queues */
1161	qpp = fm10k_queues_per_pool(hw);
1162	idx = fm10k_vf_queue_index(hw, vf_idx);
1163	fm10k_update_hw_stats_q(hw, q, idx, qpp);
1164}
1165
1166/**
1167 *  fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
1168 *  @hw: Pointer to hardware structure
1169 *  @results: Pointer array to message, results[0] is pointer to message
1170 *  @mbx: Pointer to mailbox information structure
1171 *
1172 *  This function is a default handler for MSI-X requests from the VF.  The
1173 *  assumption is that in this case it is acceptable to just directly
1174 *  hand off the message from the VF to the underlying shared code.
1175 **/
1176s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
1177			  struct fm10k_mbx_info *mbx)
1178{
1179	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1180	u8 vf_idx = vf_info->vf_idx;
1181
1182	return hw->iov.ops.assign_int_moderator(hw, vf_idx);
1183}
1184
1185/**
1186 * fm10k_iov_select_vid - Select correct default VLAN ID
1187 * @hw: Pointer to hardware structure
1188 * @vid: VLAN ID to correct
1189 *
1190 * Will report an error if the VLAN ID is out of range. For VID = 0, it will
1191 * return either the pf_vid or sw_vid depending on which one is set.
1192 */
1193static s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
1194{
1195	if (!vid)
1196		return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
1197	else if (vf_info->pf_vid && vid != vf_info->pf_vid)
1198		return FM10K_ERR_PARAM;
1199	else
1200		return vid;
1201}
1202
1203/**
1204 *  fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
1205 *  @hw: Pointer to hardware structure
1206 *  @results: Pointer array to message, results[0] is pointer to message
1207 *  @mbx: Pointer to mailbox information structure
1208 *
1209 *  This function is a default handler for MAC/VLAN requests from the VF.
1210 *  The assumption is that in this case it is acceptable to just directly
1211 *  hand off the message from the VF to the underlying shared code.
1212 **/
1213s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
1214			      struct fm10k_mbx_info *mbx)
1215{
1216	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1217	u8 mac[ETH_ALEN];
1218	u32 *result;
1219	int err = 0;
1220	bool set;
1221	u16 vlan;
1222	u32 vid;
1223
1224	/* we shouldn't be updating rules on a disabled interface */
1225	if (!FM10K_VF_FLAG_ENABLED(vf_info))
1226		err = FM10K_ERR_PARAM;
1227
1228	if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
1229		result = results[FM10K_MAC_VLAN_MSG_VLAN];
1230
1231		/* record VLAN id requested */
1232		err = fm10k_tlv_attr_get_u32(result, &vid);
1233		if (err)
1234			return err;
1235
1236		set = !(vid & FM10K_VLAN_CLEAR);
1237		vid &= ~FM10K_VLAN_CLEAR;
1238
1239		/* if the length field has been set, this is a multi-bit
1240		 * update request. For multi-bit requests, simply disallow
1241		 * them when the pf_vid has been set. In this case, the PF
1242		 * should have already cleared the VLAN_TABLE, and if we
1243		 * allowed them, it could allow a rogue VF to receive traffic
1244		 * on a VLAN it was not assigned. In the single-bit case, we
1245		 * need to modify requests for VLAN 0 to use the default PF or
1246		 * SW vid when assigned.
1247		 */
1248
1249		if (vid >> 16) {
1250			/* prevent multi-bit requests when PF has
1251			 * administratively set the VLAN for this VF
1252			 */
1253			if (vf_info->pf_vid)
1254				return FM10K_ERR_PARAM;
1255		} else {
1256			err = fm10k_iov_select_vid(vf_info, (u16)vid);
1257			if (err < 0)
1258				return err;
1259
1260			vid = err;
1261		}
1262
1263		/* update VSI info for VF in regards to VLAN table */
1264		err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
1265	}
1266
1267	if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
1268		result = results[FM10K_MAC_VLAN_MSG_MAC];
1269
1270		/* record unicast MAC address requested */
1271		err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1272		if (err)
1273			return err;
1274
1275		/* block attempts to set MAC for a locked device */
1276		if (is_valid_ether_addr(vf_info->mac) &&
1277		    !ether_addr_equal(mac, vf_info->mac))
1278			return FM10K_ERR_PARAM;
1279
1280		set = !(vlan & FM10K_VLAN_CLEAR);
1281		vlan &= ~FM10K_VLAN_CLEAR;
1282
1283		err = fm10k_iov_select_vid(vf_info, vlan);
1284		if (err < 0)
1285			return err;
1286
1287		vlan = (u16)err;
1288
1289		/* notify switch of request for new unicast address */
1290		err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
1291						 mac, vlan, set, 0);
1292	}
1293
1294	if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
1295		result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
1296
1297		/* record multicast MAC address requested */
1298		err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1299		if (err)
1300			return err;
1301
1302		/* verify that the VF is allowed to request multicast */
1303		if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
1304			return FM10K_ERR_PARAM;
1305
1306		set = !(vlan & FM10K_VLAN_CLEAR);
1307		vlan &= ~FM10K_VLAN_CLEAR;
1308
1309		err = fm10k_iov_select_vid(vf_info, vlan);
1310		if (err < 0)
1311			return err;
1312
1313		vlan = (u16)err;
1314
1315		/* notify switch of request for new multicast address */
1316		err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
1317						 mac, vlan, set);
1318	}
1319
1320	return err;
1321}
1322
1323/**
1324 *  fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
1325 *  @vf_info: VF info structure containing capability flags
1326 *  @mode: Requested xcast mode
1327 *
1328 *  This function outputs the mode that most closely matches the requested
1329 *  mode.  If not modes match it will request we disable the port
1330 **/
1331static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
1332					    u8 mode)
1333{
1334	u8 vf_flags = vf_info->vf_flags;
1335
1336	/* match up mode to capabilities as best as possible */
1337	switch (mode) {
1338	case FM10K_XCAST_MODE_PROMISC:
1339		if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
1340			return FM10K_XCAST_MODE_PROMISC;
1341		/* fallthough */
1342	case FM10K_XCAST_MODE_ALLMULTI:
1343		if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
1344			return FM10K_XCAST_MODE_ALLMULTI;
1345		/* fallthough */
1346	case FM10K_XCAST_MODE_MULTI:
1347		if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
1348			return FM10K_XCAST_MODE_MULTI;
1349		/* fallthough */
1350	case FM10K_XCAST_MODE_NONE:
1351		if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
1352			return FM10K_XCAST_MODE_NONE;
1353		/* fallthough */
1354	default:
1355		break;
1356	}
1357
1358	/* disable interface as it should not be able to request any */
1359	return FM10K_XCAST_MODE_DISABLE;
1360}
1361
1362/**
1363 *  fm10k_iov_msg_lport_state_pf - Message handler for port state requests
1364 *  @hw: Pointer to hardware structure
1365 *  @results: Pointer array to message, results[0] is pointer to message
1366 *  @mbx: Pointer to mailbox information structure
1367 *
1368 *  This function is a default handler for port state requests.  The port
1369 *  state requests for now are basic and consist of enabling or disabling
1370 *  the port.
1371 **/
1372s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
1373				 struct fm10k_mbx_info *mbx)
1374{
1375	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1376	u32 *result;
1377	s32 err = 0;
1378	u32 msg[2];
1379	u8 mode = 0;
1380
1381	/* verify VF is allowed to enable even minimal mode */
1382	if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
1383		return FM10K_ERR_PARAM;
1384
1385	if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
1386		result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
1387
1388		/* XCAST mode update requested */
1389		err = fm10k_tlv_attr_get_u8(result, &mode);
1390		if (err)
1391			return FM10K_ERR_PARAM;
1392
1393		/* prep for possible demotion depending on capabilities */
1394		mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
1395
1396		/* if mode is not currently enabled, enable it */
1397		if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
1398			fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
1399
1400		/* swap mode back to a bit flag */
1401		mode = FM10K_VF_FLAG_SET_MODE(mode);
1402	} else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
1403		/* need to disable the port if it is already enabled */
1404		if (FM10K_VF_FLAG_ENABLED(vf_info))
1405			err = fm10k_update_lport_state_pf(hw, vf_info->glort,
1406							  1, false);
1407
1408		/* we need to clear VF_FLAG_ENABLED flags in order to ensure
1409		 * that we actually re-enable the LPORT state below. Note that
1410		 * this has no impact if the VF is already disabled, as the
1411		 * flags are already cleared.
1412		 */
1413		if (!err)
1414			vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
1415
1416		/* when enabling the port we should reset the rate limiters */
1417		hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
1418
1419		/* set mode for minimal functionality */
1420		mode = FM10K_VF_FLAG_SET_MODE_NONE;
1421
1422		/* generate port state response to notify VF it is ready */
1423		fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1424		fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
1425		mbx->ops.enqueue_tx(hw, mbx, msg);
1426	}
1427
1428	/* if enable state toggled note the update */
1429	if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
1430		err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
1431						  !!mode);
1432
1433	/* if state change succeeded, then update our stored state */
1434	mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
1435	if (!err)
1436		vf_info->vf_flags = mode;
1437
1438	return err;
1439}
1440
1441/**
1442 *  fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
1443 *  @hw: pointer to hardware structure
1444 *  @stats: pointer to the stats structure to update
1445 *
1446 *  This function collects and aggregates global and per queue hardware
1447 *  statistics.
1448 **/
1449static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
1450				     struct fm10k_hw_stats *stats)
1451{
1452	u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
1453	u32 id, id_prev;
1454
1455	/* Use Tx queue 0 as a canary to detect a reset */
1456	id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
1457
1458	/* Read Global Statistics */
1459	do {
1460		timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
1461						  &stats->timeout);
1462		ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
1463		ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
1464		um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
1465		xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
1466		vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
1467						    &stats->vlan_drop);
1468		loopback_drop =
1469			fm10k_read_hw_stats_32b(hw,
1470						FM10K_STATS_LOOPBACK_DROP,
1471						&stats->loopback_drop);
1472		nodesc_drop = fm10k_read_hw_stats_32b(hw,
1473						      FM10K_STATS_NODESC_DROP,
1474						      &stats->nodesc_drop);
1475
1476		/* if value has not changed then we have consistent data */
1477		id_prev = id;
1478		id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
1479	} while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
1480
1481	/* drop non-ID bits and set VALID ID bit */
1482	id &= FM10K_TXQCTL_ID_MASK;
1483	id |= FM10K_STAT_VALID;
1484
1485	/* Update Global Statistics */
1486	if (stats->stats_idx == id) {
1487		stats->timeout.count += timeout;
1488		stats->ur.count += ur;
1489		stats->ca.count += ca;
1490		stats->um.count += um;
1491		stats->xec.count += xec;
1492		stats->vlan_drop.count += vlan_drop;
1493		stats->loopback_drop.count += loopback_drop;
1494		stats->nodesc_drop.count += nodesc_drop;
1495	}
1496
1497	/* Update bases and record current PF id */
1498	fm10k_update_hw_base_32b(&stats->timeout, timeout);
1499	fm10k_update_hw_base_32b(&stats->ur, ur);
1500	fm10k_update_hw_base_32b(&stats->ca, ca);
1501	fm10k_update_hw_base_32b(&stats->um, um);
1502	fm10k_update_hw_base_32b(&stats->xec, xec);
1503	fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
1504	fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
1505	fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
1506	stats->stats_idx = id;
1507
1508	/* Update Queue Statistics */
1509	fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
1510}
1511
1512/**
1513 *  fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
1514 *  @hw: pointer to hardware structure
1515 *  @stats: pointer to the stats structure to update
1516 *
1517 *  This function resets the base for global and per queue hardware
1518 *  statistics.
1519 **/
1520static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
1521				     struct fm10k_hw_stats *stats)
1522{
1523	/* Unbind Global Statistics */
1524	fm10k_unbind_hw_stats_32b(&stats->timeout);
1525	fm10k_unbind_hw_stats_32b(&stats->ur);
1526	fm10k_unbind_hw_stats_32b(&stats->ca);
1527	fm10k_unbind_hw_stats_32b(&stats->um);
1528	fm10k_unbind_hw_stats_32b(&stats->xec);
1529	fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
1530	fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
1531	fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
1532
1533	/* Unbind Queue Statistics */
1534	fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
1535
1536	/* Reinitialize bases for all stats */
1537	fm10k_update_hw_stats_pf(hw, stats);
1538}
1539
1540/**
1541 *  fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
1542 *  @hw: pointer to hardware structure
1543 *  @dma_mask: 64 bit DMA mask required for platform
1544 *
1545 *  This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
1546 *  to limit the access to memory beyond what is physically in the system.
1547 **/
1548static void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
1549{
1550	/* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
1551	u32 phyaddr = (u32)(dma_mask >> 32);
1552
1553	fm10k_write_reg(hw, FM10K_PHYADDR, phyaddr);
1554}
1555
1556/**
1557 *  fm10k_get_fault_pf - Record a fault in one of the interface units
1558 *  @hw: pointer to hardware structure
1559 *  @type: pointer to fault type register offset
1560 *  @fault: pointer to memory location to record the fault
1561 *
1562 *  Record the fault register contents to the fault data structure and
1563 *  clear the entry from the register.
1564 *
1565 *  Returns ERR_PARAM if invalid register is specified or no error is present.
1566 **/
1567static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
1568			      struct fm10k_fault *fault)
1569{
1570	u32 func;
1571
1572	/* verify the fault register is in range and is aligned */
1573	switch (type) {
1574	case FM10K_PCA_FAULT:
1575	case FM10K_THI_FAULT:
1576	case FM10K_FUM_FAULT:
1577		break;
1578	default:
1579		return FM10K_ERR_PARAM;
1580	}
1581
1582	/* only service faults that are valid */
1583	func = fm10k_read_reg(hw, type + FM10K_FAULT_FUNC);
1584	if (!(func & FM10K_FAULT_FUNC_VALID))
1585		return FM10K_ERR_PARAM;
1586
1587	/* read remaining fields */
1588	fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
1589	fault->address <<= 32;
1590	fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
1591	fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
1592
1593	/* clear valid bit to allow for next error */
1594	fm10k_write_reg(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
1595
1596	/* Record which function triggered the error */
1597	if (func & FM10K_FAULT_FUNC_PF)
1598		fault->func = 0;
1599	else
1600		fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
1601				   FM10K_FAULT_FUNC_VF_SHIFT);
1602
1603	/* record fault type */
1604	fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
1605
1606	return 0;
1607}
1608
1609/**
1610 *  fm10k_request_lport_map_pf - Request LPORT map from the switch API
1611 *  @hw: pointer to hardware structure
1612 *
1613 **/
1614static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
1615{
1616	struct fm10k_mbx_info *mbx = &hw->mbx;
1617	u32 msg[1];
1618
1619	/* issue request asking for LPORT map */
1620	fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
1621
1622	/* load onto outgoing mailbox */
1623	return mbx->ops.enqueue_tx(hw, mbx, msg);
1624}
1625
1626/**
1627 *  fm10k_get_host_state_pf - Returns the state of the switch and mailbox
1628 *  @hw: pointer to hardware structure
1629 *  @switch_ready: pointer to boolean value that will record switch state
1630 *
1631 *  This function will check the DMA_CTRL2 register and mailbox in order
1632 *  to determine if the switch is ready for the PF to begin requesting
1633 *  addresses and mapping traffic to the local interface.
1634 **/
1635static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
1636{
1637	u32 dma_ctrl2;
1638
1639	/* verify the switch is ready for interaction */
1640	dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
1641	if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
1642		return 0;
1643
1644	/* retrieve generic host state info */
1645	return fm10k_get_host_state_generic(hw, switch_ready);
1646}
1647
1648/* This structure defines the attibutes to be parsed below */
1649const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
1650	FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
1651				 sizeof(struct fm10k_swapi_error)),
1652	FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
1653	FM10K_TLV_ATTR_LAST
1654};
1655
1656/**
1657 *  fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
1658 *  @hw: Pointer to hardware structure
1659 *  @results: pointer array containing parsed data
1660 *  @mbx: Pointer to mailbox information structure
1661 *
1662 *  This handler configures the lport mapping based on the reply from the
1663 *  switch API.
1664 **/
1665s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
1666			   struct fm10k_mbx_info *mbx)
1667{
1668	u16 glort, mask;
1669	u32 dglort_map;
1670	s32 err;
1671
1672	err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
1673				     &dglort_map);
1674	if (err)
1675		return err;
1676
1677	/* extract values out of the header */
1678	glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
1679	mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
1680
1681	/* verify mask is set and none of the masked bits in glort are set */
1682	if (!mask || (glort & ~mask))
1683		return FM10K_ERR_PARAM;
1684
1685	/* verify the mask is contiguous, and that it is 1's followed by 0's */
1686	if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
1687		return FM10K_ERR_PARAM;
1688
1689	/* record the glort, mask, and port count */
1690	hw->mac.dglort_map = dglort_map;
1691
1692	return 0;
1693}
1694
1695const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
1696	FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
1697	FM10K_TLV_ATTR_LAST
1698};
1699
1700/**
1701 *  fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
1702 *  @hw: Pointer to hardware structure
1703 *  @results: pointer array containing parsed data
1704 *  @mbx: Pointer to mailbox information structure
1705 *
1706 *  This handler configures the default VLAN for the PF
1707 **/
1708static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
1709				    struct fm10k_mbx_info *mbx)
1710{
1711	u16 glort, pvid;
1712	u32 pvid_update;
1713	s32 err;
1714
1715	err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
1716				     &pvid_update);
1717	if (err)
1718		return err;
1719
1720	/* extract values from the pvid update */
1721	glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
1722	pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
1723
1724	/* if glort is not valid return error */
1725	if (!fm10k_glort_valid_pf(hw, glort))
1726		return FM10K_ERR_PARAM;
1727
1728	/* verify VLAN ID is valid */
1729	if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
1730		return FM10K_ERR_PARAM;
1731
1732	/* record the port VLAN ID value */
1733	hw->mac.default_vid = pvid;
1734
1735	return 0;
1736}
1737
1738/**
1739 *  fm10k_record_global_table_data - Move global table data to swapi table info
1740 *  @from: pointer to source table data structure
1741 *  @to: pointer to destination table info structure
1742 *
1743 *  This function is will copy table_data to the table_info contained in
1744 *  the hw struct.
1745 **/
1746static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
1747					   struct fm10k_swapi_table_info *to)
1748{
1749	/* convert from le32 struct to CPU byte ordered values */
1750	to->used = le32_to_cpu(from->used);
1751	to->avail = le32_to_cpu(from->avail);
1752}
1753
1754const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
1755	FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
1756				 sizeof(struct fm10k_swapi_error)),
1757	FM10K_TLV_ATTR_LAST
1758};
1759
1760/**
1761 *  fm10k_msg_err_pf - Message handler for error reply
1762 *  @hw: Pointer to hardware structure
1763 *  @results: pointer array containing parsed data
1764 *  @mbx: Pointer to mailbox information structure
1765 *
1766 *  This handler will capture the data for any error replies to previous
1767 *  messages that the PF has sent.
1768 **/
1769s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
1770		     struct fm10k_mbx_info *mbx)
1771{
1772	struct fm10k_swapi_error err_msg;
1773	s32 err;
1774
1775	/* extract structure from message */
1776	err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
1777					   &err_msg, sizeof(err_msg));
1778	if (err)
1779		return err;
1780
1781	/* record table status */
1782	fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
1783	fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
1784	fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
1785
1786	/* record SW API status value */
1787	hw->swapi.status = le32_to_cpu(err_msg.status);
1788
1789	return 0;
1790}
1791
1792static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
1793	FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1794	FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1795	FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1796	FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1797	FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1798	FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1799	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1800};
1801
1802static const struct fm10k_mac_ops mac_ops_pf = {
1803	.get_bus_info		= fm10k_get_bus_info_generic,
1804	.reset_hw		= fm10k_reset_hw_pf,
1805	.init_hw		= fm10k_init_hw_pf,
1806	.start_hw		= fm10k_start_hw_generic,
1807	.stop_hw		= fm10k_stop_hw_generic,
1808	.update_vlan		= fm10k_update_vlan_pf,
1809	.read_mac_addr		= fm10k_read_mac_addr_pf,
1810	.update_uc_addr		= fm10k_update_uc_addr_pf,
1811	.update_mc_addr		= fm10k_update_mc_addr_pf,
1812	.update_xcast_mode	= fm10k_update_xcast_mode_pf,
1813	.update_int_moderator	= fm10k_update_int_moderator_pf,
1814	.update_lport_state	= fm10k_update_lport_state_pf,
1815	.update_hw_stats	= fm10k_update_hw_stats_pf,
1816	.rebind_hw_stats	= fm10k_rebind_hw_stats_pf,
1817	.configure_dglort_map	= fm10k_configure_dglort_map_pf,
1818	.set_dma_mask		= fm10k_set_dma_mask_pf,
1819	.get_fault		= fm10k_get_fault_pf,
1820	.get_host_state		= fm10k_get_host_state_pf,
1821	.request_lport_map	= fm10k_request_lport_map_pf,
1822};
1823
1824static const struct fm10k_iov_ops iov_ops_pf = {
1825	.assign_resources		= fm10k_iov_assign_resources_pf,
1826	.configure_tc			= fm10k_iov_configure_tc_pf,
1827	.assign_int_moderator		= fm10k_iov_assign_int_moderator_pf,
1828	.assign_default_mac_vlan	= fm10k_iov_assign_default_mac_vlan_pf,
1829	.reset_resources		= fm10k_iov_reset_resources_pf,
1830	.set_lport			= fm10k_iov_set_lport_pf,
1831	.reset_lport			= fm10k_iov_reset_lport_pf,
1832	.update_stats			= fm10k_iov_update_stats_pf,
1833};
1834
1835static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
1836{
1837	fm10k_get_invariants_generic(hw);
1838
1839	return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
1840}
1841
1842const struct fm10k_info fm10k_pf_info = {
1843	.mac		= fm10k_mac_pf,
1844	.get_invariants	= fm10k_get_invariants_pf,
1845	.mac_ops	= &mac_ops_pf,
1846	.iov_ops	= &iov_ops_pf,
1847};