Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018-2023, Intel Corporation. */
   3
   4/* Intel(R) Ethernet Connection E800 Series Linux Driver */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <generated/utsrelease.h>
   9#include <linux/crash_dump.h>
  10#include "ice.h"
  11#include "ice_base.h"
  12#include "ice_lib.h"
  13#include "ice_fltr.h"
  14#include "ice_dcb_lib.h"
  15#include "ice_dcb_nl.h"
  16#include "ice_devlink.h"
  17#include "ice_hwmon.h"
  18/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
  19 * ice tracepoint functions. This must be done exactly once across the
  20 * ice driver.
  21 */
  22#define CREATE_TRACE_POINTS
  23#include "ice_trace.h"
  24#include "ice_eswitch.h"
  25#include "ice_tc_lib.h"
  26#include "ice_vsi_vlan_ops.h"
  27#include <net/xdp_sock_drv.h>
  28
  29#define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
  30static const char ice_driver_string[] = DRV_SUMMARY;
  31static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
  32
  33/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
  34#define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
  35#define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
  36
  37MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  38MODULE_DESCRIPTION(DRV_SUMMARY);
  39MODULE_LICENSE("GPL v2");
  40MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
  41
  42static int debug = -1;
  43module_param(debug, int, 0644);
  44#ifndef CONFIG_DYNAMIC_DEBUG
  45MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
  46#else
  47MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
  48#endif /* !CONFIG_DYNAMIC_DEBUG */
  49
  50DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
  51EXPORT_SYMBOL(ice_xdp_locking_key);
  52
  53/**
  54 * ice_hw_to_dev - Get device pointer from the hardware structure
  55 * @hw: pointer to the device HW structure
  56 *
  57 * Used to access the device pointer from compilation units which can't easily
  58 * include the definition of struct ice_pf without leading to circular header
  59 * dependencies.
  60 */
  61struct device *ice_hw_to_dev(struct ice_hw *hw)
  62{
  63	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
  64
  65	return &pf->pdev->dev;
  66}
  67
  68static struct workqueue_struct *ice_wq;
  69struct workqueue_struct *ice_lag_wq;
  70static const struct net_device_ops ice_netdev_safe_mode_ops;
  71static const struct net_device_ops ice_netdev_ops;
 
  72
  73static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
  74
  75static void ice_vsi_release_all(struct ice_pf *pf);
  76
  77static int ice_rebuild_channels(struct ice_pf *pf);
  78static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
  79
  80static int
  81ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
  82		     void *cb_priv, enum tc_setup_type type, void *type_data,
  83		     void *data,
  84		     void (*cleanup)(struct flow_block_cb *block_cb));
  85
  86bool netif_is_ice(const struct net_device *dev)
  87{
  88	return dev && (dev->netdev_ops == &ice_netdev_ops);
  89}
  90
  91/**
  92 * ice_get_tx_pending - returns number of Tx descriptors not processed
  93 * @ring: the ring of descriptors
  94 */
  95static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
  96{
  97	u16 head, tail;
  98
  99	head = ring->next_to_clean;
 100	tail = ring->next_to_use;
 101
 102	if (head != tail)
 103		return (head < tail) ?
 104			tail - head : (tail + ring->count - head);
 105	return 0;
 106}
 107
 108/**
 109 * ice_check_for_hang_subtask - check for and recover hung queues
 110 * @pf: pointer to PF struct
 111 */
 112static void ice_check_for_hang_subtask(struct ice_pf *pf)
 113{
 114	struct ice_vsi *vsi = NULL;
 115	struct ice_hw *hw;
 116	unsigned int i;
 117	int packets;
 118	u32 v;
 119
 120	ice_for_each_vsi(pf, v)
 121		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
 122			vsi = pf->vsi[v];
 123			break;
 124		}
 125
 126	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
 127		return;
 128
 129	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
 130		return;
 131
 132	hw = &vsi->back->hw;
 133
 134	ice_for_each_txq(vsi, i) {
 135		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
 136		struct ice_ring_stats *ring_stats;
 137
 138		if (!tx_ring)
 139			continue;
 140		if (ice_ring_ch_enabled(tx_ring))
 141			continue;
 142
 143		ring_stats = tx_ring->ring_stats;
 144		if (!ring_stats)
 145			continue;
 146
 147		if (tx_ring->desc) {
 148			/* If packet counter has not changed the queue is
 149			 * likely stalled, so force an interrupt for this
 150			 * queue.
 151			 *
 152			 * prev_pkt would be negative if there was no
 153			 * pending work.
 154			 */
 155			packets = ring_stats->stats.pkts & INT_MAX;
 156			if (ring_stats->tx_stats.prev_pkt == packets) {
 157				/* Trigger sw interrupt to revive the queue */
 158				ice_trigger_sw_intr(hw, tx_ring->q_vector);
 159				continue;
 160			}
 161
 162			/* Memory barrier between read of packet count and call
 163			 * to ice_get_tx_pending()
 164			 */
 165			smp_rmb();
 166			ring_stats->tx_stats.prev_pkt =
 167			    ice_get_tx_pending(tx_ring) ? packets : -1;
 168		}
 169	}
 170}
 171
 172/**
 173 * ice_init_mac_fltr - Set initial MAC filters
 174 * @pf: board private structure
 175 *
 176 * Set initial set of MAC filters for PF VSI; configure filters for permanent
 177 * address and broadcast address. If an error is encountered, netdevice will be
 178 * unregistered.
 179 */
 180static int ice_init_mac_fltr(struct ice_pf *pf)
 181{
 
 182	struct ice_vsi *vsi;
 183	u8 *perm_addr;
 184
 185	vsi = ice_get_main_vsi(pf);
 186	if (!vsi)
 187		return -EINVAL;
 188
 189	perm_addr = vsi->port_info->mac.perm_addr;
 190	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
 
 
 
 
 191}
 192
 193/**
 194 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
 195 * @netdev: the net device on which the sync is happening
 196 * @addr: MAC address to sync
 197 *
 198 * This is a callback function which is called by the in kernel device sync
 199 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
 200 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
 201 * MAC filters from the hardware.
 202 */
 203static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
 204{
 205	struct ice_netdev_priv *np = netdev_priv(netdev);
 206	struct ice_vsi *vsi = np->vsi;
 207
 208	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
 209				     ICE_FWD_TO_VSI))
 210		return -EINVAL;
 211
 212	return 0;
 213}
 214
 215/**
 216 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
 217 * @netdev: the net device on which the unsync is happening
 218 * @addr: MAC address to unsync
 219 *
 220 * This is a callback function which is called by the in kernel device unsync
 221 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
 222 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
 223 * delete the MAC filters from the hardware.
 224 */
 225static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
 226{
 227	struct ice_netdev_priv *np = netdev_priv(netdev);
 228	struct ice_vsi *vsi = np->vsi;
 229
 230	/* Under some circumstances, we might receive a request to delete our
 231	 * own device address from our uc list. Because we store the device
 232	 * address in the VSI's MAC filter list, we need to ignore such
 233	 * requests and not delete our device address from this list.
 234	 */
 235	if (ether_addr_equal(addr, netdev->dev_addr))
 236		return 0;
 237
 238	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
 239				     ICE_FWD_TO_VSI))
 240		return -EINVAL;
 241
 242	return 0;
 243}
 244
 245/**
 246 * ice_vsi_fltr_changed - check if filter state changed
 247 * @vsi: VSI to be checked
 248 *
 249 * returns true if filter state has changed, false otherwise.
 250 */
 251static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
 252{
 253	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
 254	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 
 255}
 256
 257/**
 258 * ice_set_promisc - Enable promiscuous mode for a given PF
 259 * @vsi: the VSI being configured
 260 * @promisc_m: mask of promiscuous config bits
 
 261 *
 262 */
 263static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
 264{
 265	int status;
 
 266
 267	if (vsi->type != ICE_VSI_PF)
 268		return 0;
 269
 270	if (ice_vsi_has_non_zero_vlans(vsi)) {
 271		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
 272		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
 273						       promisc_m);
 274	} else {
 275		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
 276						  promisc_m, 0);
 
 
 
 
 277	}
 278	if (status && status != -EEXIST)
 279		return status;
 280
 281	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
 282		   vsi->vsi_num, promisc_m);
 283	return 0;
 284}
 285
 286/**
 287 * ice_clear_promisc - Disable promiscuous mode for a given PF
 288 * @vsi: the VSI being configured
 289 * @promisc_m: mask of promiscuous config bits
 290 *
 291 */
 292static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
 293{
 294	int status;
 295
 296	if (vsi->type != ICE_VSI_PF)
 297		return 0;
 298
 299	if (ice_vsi_has_non_zero_vlans(vsi)) {
 300		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
 301		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
 302							 promisc_m);
 303	} else {
 304		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
 305						    promisc_m, 0);
 306	}
 307
 308	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
 309		   vsi->vsi_num, promisc_m);
 310	return status;
 311}
 312
 313/**
 314 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
 315 * @vsi: ptr to the VSI
 316 *
 317 * Push any outstanding VSI filter changes through the AdminQ.
 318 */
 319static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
 320{
 321	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
 322	struct device *dev = ice_pf_to_dev(vsi->back);
 323	struct net_device *netdev = vsi->netdev;
 324	bool promisc_forced_on = false;
 325	struct ice_pf *pf = vsi->back;
 326	struct ice_hw *hw = &pf->hw;
 
 327	u32 changed_flags = 0;
 328	int err;
 
 329
 330	if (!vsi->netdev)
 331		return -EINVAL;
 332
 333	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
 334		usleep_range(1000, 2000);
 335
 336	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
 337	vsi->current_netdev_flags = vsi->netdev->flags;
 338
 339	INIT_LIST_HEAD(&vsi->tmp_sync_list);
 340	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
 341
 342	if (ice_vsi_fltr_changed(vsi)) {
 343		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
 344		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 
 345
 346		/* grab the netdev's addr_list_lock */
 347		netif_addr_lock_bh(netdev);
 348		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
 349			      ice_add_mac_to_unsync_list);
 350		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
 351			      ice_add_mac_to_unsync_list);
 352		/* our temp lists are populated. release lock */
 353		netif_addr_unlock_bh(netdev);
 354	}
 355
 356	/* Remove MAC addresses in the unsync list */
 357	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
 358	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
 359	if (err) {
 360		netdev_err(netdev, "Failed to delete MAC filters\n");
 361		/* if we failed because of alloc failures, just bail */
 362		if (err == -ENOMEM)
 
 363			goto out;
 
 364	}
 365
 366	/* Add MAC addresses in the sync list */
 367	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
 368	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
 369	/* If filter is added successfully or already exists, do not go into
 370	 * 'if' condition and report it as error. Instead continue processing
 371	 * rest of the function.
 372	 */
 373	if (err && err != -EEXIST) {
 374		netdev_err(netdev, "Failed to add MAC filters\n");
 375		/* If there is no more space for new umac filters, VSI
 376		 * should go into promiscuous mode. There should be some
 377		 * space reserved for promiscuous filters.
 378		 */
 379		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
 380		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
 381				      vsi->state)) {
 382			promisc_forced_on = true;
 383			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
 384				    vsi->vsi_num);
 385		} else {
 
 386			goto out;
 387		}
 388	}
 389	err = 0;
 390	/* check for changes in promiscuous modes */
 391	if (changed_flags & IFF_ALLMULTI) {
 392		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
 393			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
 
 
 
 
 
 394			if (err) {
 
 
 395				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
 396				goto out_promisc;
 397			}
 398		} else {
 399			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
 400			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
 
 
 
 
 
 401			if (err) {
 
 
 402				vsi->current_netdev_flags |= IFF_ALLMULTI;
 403				goto out_promisc;
 404			}
 405		}
 406	}
 407
 408	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
 409	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
 410		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
 411		if (vsi->current_netdev_flags & IFF_PROMISC) {
 412			/* Apply Rx filter rule to get traffic from wire */
 413			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
 414				err = ice_set_dflt_vsi(vsi);
 415				if (err && err != -EEXIST) {
 416					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
 417						   err, vsi->vsi_num);
 418					vsi->current_netdev_flags &=
 419						~IFF_PROMISC;
 420					goto out_promisc;
 421				}
 422				err = 0;
 423				vlan_ops->dis_rx_filtering(vsi);
 424
 425				/* promiscuous mode implies allmulticast so
 426				 * that VSIs that are in promiscuous mode are
 427				 * subscribed to multicast packets coming to
 428				 * the port
 429				 */
 430				err = ice_set_promisc(vsi,
 431						      ICE_MCAST_PROMISC_BITS);
 432				if (err)
 433					goto out_promisc;
 434			}
 435		} else {
 436			/* Clear Rx filter to remove traffic from wire */
 437			if (ice_is_vsi_dflt_vsi(vsi)) {
 438				err = ice_clear_dflt_vsi(vsi);
 439				if (err) {
 440					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
 441						   err, vsi->vsi_num);
 442					vsi->current_netdev_flags |=
 443						IFF_PROMISC;
 444					goto out_promisc;
 445				}
 446				if (vsi->netdev->features &
 447				    NETIF_F_HW_VLAN_CTAG_FILTER)
 448					vlan_ops->ena_rx_filtering(vsi);
 449			}
 450
 451			/* disable allmulti here, but only if allmulti is not
 452			 * still enabled for the netdev
 453			 */
 454			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
 455				err = ice_clear_promisc(vsi,
 456							ICE_MCAST_PROMISC_BITS);
 457				if (err) {
 458					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
 459						   err, vsi->vsi_num);
 460				}
 461			}
 462		}
 463	}
 464	goto exit;
 465
 466out_promisc:
 467	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
 468	goto exit;
 469out:
 470	/* if something went wrong then set the changed flag so we try again */
 471	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
 472	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 473exit:
 474	clear_bit(ICE_CFG_BUSY, vsi->state);
 475	return err;
 476}
 477
 478/**
 479 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
 480 * @pf: board private structure
 481 */
 482static void ice_sync_fltr_subtask(struct ice_pf *pf)
 483{
 484	int v;
 485
 486	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
 487		return;
 488
 489	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 490
 491	ice_for_each_vsi(pf, v)
 492		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
 493		    ice_vsi_sync_fltr(pf->vsi[v])) {
 494			/* come back and try again later */
 495			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 496			break;
 497		}
 498}
 499
 500/**
 501 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
 502 * @pf: the PF
 503 * @locked: is the rtnl_lock already held
 504 */
 505static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
 506{
 507	int node;
 508	int v;
 509
 510	ice_for_each_vsi(pf, v)
 511		if (pf->vsi[v])
 512			ice_dis_vsi(pf->vsi[v], locked);
 513
 514	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
 515		pf->pf_agg_node[node].num_vsis = 0;
 516
 517	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
 518		pf->vf_agg_node[node].num_vsis = 0;
 519}
 520
 521/**
 522 * ice_clear_sw_switch_recipes - clear switch recipes
 523 * @pf: board private structure
 524 *
 525 * Mark switch recipes as not created in sw structures. There are cases where
 526 * rules (especially advanced rules) need to be restored, either re-read from
 527 * hardware or added again. For example after the reset. 'recp_created' flag
 528 * prevents from doing that and need to be cleared upfront.
 529 */
 530static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
 531{
 532	struct ice_sw_recipe *recp;
 533	u8 i;
 534
 535	recp = pf->hw.switch_info->recp_list;
 536	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
 537		recp[i].recp_created = false;
 538}
 539
 540/**
 541 * ice_prepare_for_reset - prep for reset
 542 * @pf: board private structure
 543 * @reset_type: reset type requested
 544 *
 545 * Inform or close all dependent features in prep for reset.
 546 */
 547static void
 548ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
 549{
 550	struct ice_hw *hw = &pf->hw;
 551	struct ice_vsi *vsi;
 552	struct ice_vf *vf;
 553	unsigned int bkt;
 554
 555	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
 556
 557	/* already prepared for reset */
 558	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
 559		return;
 560
 561	ice_unplug_aux_dev(pf);
 562
 563	/* Notify VFs of impending reset */
 564	if (ice_check_sq_alive(hw, &hw->mailboxq))
 565		ice_vc_notify_reset(pf);
 566
 567	/* Disable VFs until reset is completed */
 568	mutex_lock(&pf->vfs.table_lock);
 569	ice_for_each_vf(pf, bkt, vf)
 570		ice_set_vf_state_dis(vf);
 571	mutex_unlock(&pf->vfs.table_lock);
 572
 573	if (ice_is_eswitch_mode_switchdev(pf)) {
 574		if (reset_type != ICE_RESET_PFR)
 575			ice_clear_sw_switch_recipes(pf);
 576	}
 577
 578	/* release ADQ specific HW and SW resources */
 579	vsi = ice_get_main_vsi(pf);
 580	if (!vsi)
 581		goto skip;
 582
 583	/* to be on safe side, reset orig_rss_size so that normal flow
 584	 * of deciding rss_size can take precedence
 585	 */
 586	vsi->orig_rss_size = 0;
 587
 588	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
 589		if (reset_type == ICE_RESET_PFR) {
 590			vsi->old_ena_tc = vsi->all_enatc;
 591			vsi->old_numtc = vsi->all_numtc;
 592		} else {
 593			ice_remove_q_channels(vsi, true);
 594
 595			/* for other reset type, do not support channel rebuild
 596			 * hence reset needed info
 597			 */
 598			vsi->old_ena_tc = 0;
 599			vsi->all_enatc = 0;
 600			vsi->old_numtc = 0;
 601			vsi->all_numtc = 0;
 602			vsi->req_txq = 0;
 603			vsi->req_rxq = 0;
 604			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
 605			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
 606		}
 607	}
 608skip:
 609
 610	/* clear SW filtering DB */
 611	ice_clear_hw_tbls(hw);
 612	/* disable the VSIs and their queues that are not already DOWN */
 613	ice_pf_dis_all_vsi(pf, false);
 614
 615	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
 616		ice_ptp_prepare_for_reset(pf, reset_type);
 617
 618	if (ice_is_feature_supported(pf, ICE_F_GNSS))
 619		ice_gnss_exit(pf);
 620
 621	if (hw->port_info)
 622		ice_sched_clear_port(hw->port_info);
 623
 624	ice_shutdown_all_ctrlq(hw);
 625
 626	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
 627}
 628
 629/**
 630 * ice_do_reset - Initiate one of many types of resets
 631 * @pf: board private structure
 632 * @reset_type: reset type requested before this function was called.
 
 633 */
 634static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
 635{
 636	struct device *dev = ice_pf_to_dev(pf);
 637	struct ice_hw *hw = &pf->hw;
 638
 639	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
 640
 641	if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
 642		dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
 643		reset_type = ICE_RESET_CORER;
 644	}
 645
 646	ice_prepare_for_reset(pf, reset_type);
 647
 648	/* trigger the reset */
 649	if (ice_reset(hw, reset_type)) {
 650		dev_err(dev, "reset %d failed\n", reset_type);
 651		set_bit(ICE_RESET_FAILED, pf->state);
 652		clear_bit(ICE_RESET_OICR_RECV, pf->state);
 653		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 654		clear_bit(ICE_PFR_REQ, pf->state);
 655		clear_bit(ICE_CORER_REQ, pf->state);
 656		clear_bit(ICE_GLOBR_REQ, pf->state);
 657		wake_up(&pf->reset_wait_queue);
 658		return;
 659	}
 660
 661	/* PFR is a bit of a special case because it doesn't result in an OICR
 662	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
 663	 * associated state bits.
 664	 */
 665	if (reset_type == ICE_RESET_PFR) {
 666		pf->pfr_count++;
 667		ice_rebuild(pf, reset_type);
 668		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 669		clear_bit(ICE_PFR_REQ, pf->state);
 670		wake_up(&pf->reset_wait_queue);
 671		ice_reset_all_vfs(pf);
 672	}
 673}
 674
 675/**
 676 * ice_reset_subtask - Set up for resetting the device and driver
 677 * @pf: board private structure
 678 */
 679static void ice_reset_subtask(struct ice_pf *pf)
 680{
 681	enum ice_reset_req reset_type = ICE_RESET_INVAL;
 682
 683	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
 684	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
 685	 * of reset is pending and sets bits in pf->state indicating the reset
 686	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
 687	 * prepare for pending reset if not already (for PF software-initiated
 688	 * global resets the software should already be prepared for it as
 689	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
 690	 * by firmware or software on other PFs, that bit is not set so prepare
 691	 * for the reset now), poll for reset done, rebuild and return.
 692	 */
 693	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
 694		/* Perform the largest reset requested */
 695		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
 696			reset_type = ICE_RESET_CORER;
 697		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
 698			reset_type = ICE_RESET_GLOBR;
 699		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
 700			reset_type = ICE_RESET_EMPR;
 701		/* return if no valid reset type requested */
 702		if (reset_type == ICE_RESET_INVAL)
 703			return;
 704		ice_prepare_for_reset(pf, reset_type);
 705
 706		/* make sure we are ready to rebuild */
 707		if (ice_check_reset(&pf->hw)) {
 708			set_bit(ICE_RESET_FAILED, pf->state);
 709		} else {
 710			/* done with reset. start rebuild */
 711			pf->hw.reset_ongoing = false;
 712			ice_rebuild(pf, reset_type);
 713			/* clear bit to resume normal operations, but
 714			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
 715			 */
 716			clear_bit(ICE_RESET_OICR_RECV, pf->state);
 717			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 718			clear_bit(ICE_PFR_REQ, pf->state);
 719			clear_bit(ICE_CORER_REQ, pf->state);
 720			clear_bit(ICE_GLOBR_REQ, pf->state);
 721			wake_up(&pf->reset_wait_queue);
 722			ice_reset_all_vfs(pf);
 723		}
 724
 725		return;
 726	}
 727
 728	/* No pending resets to finish processing. Check for new resets */
 729	if (test_bit(ICE_PFR_REQ, pf->state)) {
 730		reset_type = ICE_RESET_PFR;
 731		if (pf->lag && pf->lag->bonded) {
 732			dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
 733			reset_type = ICE_RESET_CORER;
 734		}
 735	}
 736	if (test_bit(ICE_CORER_REQ, pf->state))
 737		reset_type = ICE_RESET_CORER;
 738	if (test_bit(ICE_GLOBR_REQ, pf->state))
 739		reset_type = ICE_RESET_GLOBR;
 740	/* If no valid reset type requested just return */
 741	if (reset_type == ICE_RESET_INVAL)
 742		return;
 743
 744	/* reset if not already down or busy */
 745	if (!test_bit(ICE_DOWN, pf->state) &&
 746	    !test_bit(ICE_CFG_BUSY, pf->state)) {
 747		ice_do_reset(pf, reset_type);
 748	}
 749}
 750
 751/**
 752 * ice_print_topo_conflict - print topology conflict message
 753 * @vsi: the VSI whose topology status is being checked
 754 */
 755static void ice_print_topo_conflict(struct ice_vsi *vsi)
 756{
 757	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
 758	case ICE_AQ_LINK_TOPO_CONFLICT:
 759	case ICE_AQ_LINK_MEDIA_CONFLICT:
 760	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
 761	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
 762	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
 763		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
 764		break;
 765	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
 766		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
 767			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
 768		else
 769			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
 770		break;
 771	default:
 772		break;
 773	}
 774}
 775
 776/**
 777 * ice_print_link_msg - print link up or down message
 778 * @vsi: the VSI whose link status is being queried
 779 * @isup: boolean for if the link is now up or down
 780 */
 781void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
 782{
 783	struct ice_aqc_get_phy_caps_data *caps;
 784	const char *an_advertised;
 
 785	const char *fec_req;
 786	const char *speed;
 787	const char *fec;
 788	const char *fc;
 789	const char *an;
 790	int status;
 791
 792	if (!vsi)
 793		return;
 794
 795	if (vsi->current_isup == isup)
 796		return;
 797
 798	vsi->current_isup = isup;
 799
 800	if (!isup) {
 801		netdev_info(vsi->netdev, "NIC Link is Down\n");
 802		return;
 803	}
 804
 805	switch (vsi->port_info->phy.link_info.link_speed) {
 806	case ICE_AQ_LINK_SPEED_100GB:
 807		speed = "100 G";
 808		break;
 809	case ICE_AQ_LINK_SPEED_50GB:
 810		speed = "50 G";
 811		break;
 812	case ICE_AQ_LINK_SPEED_40GB:
 813		speed = "40 G";
 814		break;
 815	case ICE_AQ_LINK_SPEED_25GB:
 816		speed = "25 G";
 817		break;
 818	case ICE_AQ_LINK_SPEED_20GB:
 819		speed = "20 G";
 820		break;
 821	case ICE_AQ_LINK_SPEED_10GB:
 822		speed = "10 G";
 823		break;
 824	case ICE_AQ_LINK_SPEED_5GB:
 825		speed = "5 G";
 826		break;
 827	case ICE_AQ_LINK_SPEED_2500MB:
 828		speed = "2.5 G";
 829		break;
 830	case ICE_AQ_LINK_SPEED_1000MB:
 831		speed = "1 G";
 832		break;
 833	case ICE_AQ_LINK_SPEED_100MB:
 834		speed = "100 M";
 835		break;
 836	default:
 837		speed = "Unknown ";
 838		break;
 839	}
 840
 841	switch (vsi->port_info->fc.current_mode) {
 842	case ICE_FC_FULL:
 843		fc = "Rx/Tx";
 844		break;
 845	case ICE_FC_TX_PAUSE:
 846		fc = "Tx";
 847		break;
 848	case ICE_FC_RX_PAUSE:
 849		fc = "Rx";
 850		break;
 851	case ICE_FC_NONE:
 852		fc = "None";
 853		break;
 854	default:
 855		fc = "Unknown";
 856		break;
 857	}
 858
 859	/* Get FEC mode based on negotiated link info */
 860	switch (vsi->port_info->phy.link_info.fec_info) {
 861	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
 862	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
 863		fec = "RS-FEC";
 864		break;
 865	case ICE_AQ_LINK_25G_KR_FEC_EN:
 866		fec = "FC-FEC/BASE-R";
 867		break;
 868	default:
 869		fec = "NONE";
 870		break;
 871	}
 872
 873	/* check if autoneg completed, might be false due to not supported */
 874	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
 875		an = "True";
 876	else
 877		an = "False";
 878
 879	/* Get FEC mode requested based on PHY caps last SW configuration */
 880	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
 881	if (!caps) {
 882		fec_req = "Unknown";
 883		an_advertised = "Unknown";
 884		goto done;
 885	}
 886
 887	status = ice_aq_get_phy_caps(vsi->port_info, false,
 888				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
 889	if (status)
 890		netdev_info(vsi->netdev, "Get phy capability failed.\n");
 891
 892	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
 893
 894	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
 895	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
 896		fec_req = "RS-FEC";
 897	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
 898		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
 899		fec_req = "FC-FEC/BASE-R";
 900	else
 901		fec_req = "NONE";
 902
 903	kfree(caps);
 904
 905done:
 906	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
 907		    speed, fec_req, fec, an_advertised, an, fc);
 908	ice_print_topo_conflict(vsi);
 909}
 910
 911/**
 912 * ice_vsi_link_event - update the VSI's netdev
 913 * @vsi: the VSI on which the link event occurred
 914 * @link_up: whether or not the VSI needs to be set up or down
 915 */
 916static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
 917{
 918	if (!vsi)
 919		return;
 920
 921	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
 922		return;
 923
 924	if (vsi->type == ICE_VSI_PF) {
 925		if (link_up == netif_carrier_ok(vsi->netdev))
 926			return;
 927
 928		if (link_up) {
 929			netif_carrier_on(vsi->netdev);
 930			netif_tx_wake_all_queues(vsi->netdev);
 931		} else {
 932			netif_carrier_off(vsi->netdev);
 933			netif_tx_stop_all_queues(vsi->netdev);
 934		}
 935	}
 936}
 937
 938/**
 939 * ice_set_dflt_mib - send a default config MIB to the FW
 940 * @pf: private PF struct
 941 *
 942 * This function sends a default configuration MIB to the FW.
 943 *
 944 * If this function errors out at any point, the driver is still able to
 945 * function.  The main impact is that LFC may not operate as expected.
 946 * Therefore an error state in this function should be treated with a DBG
 947 * message and continue on with driver rebuild/reenable.
 948 */
 949static void ice_set_dflt_mib(struct ice_pf *pf)
 950{
 951	struct device *dev = ice_pf_to_dev(pf);
 952	u8 mib_type, *buf, *lldpmib = NULL;
 953	u16 len, typelen, offset = 0;
 954	struct ice_lldp_org_tlv *tlv;
 955	struct ice_hw *hw = &pf->hw;
 956	u32 ouisubtype;
 957
 958	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
 959	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
 960	if (!lldpmib) {
 961		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
 962			__func__);
 963		return;
 964	}
 965
 966	/* Add ETS CFG TLV */
 967	tlv = (struct ice_lldp_org_tlv *)lldpmib;
 968	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
 969		   ICE_IEEE_ETS_TLV_LEN);
 970	tlv->typelen = htons(typelen);
 971	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 972		      ICE_IEEE_SUBTYPE_ETS_CFG);
 973	tlv->ouisubtype = htonl(ouisubtype);
 974
 975	buf = tlv->tlvinfo;
 976	buf[0] = 0;
 977
 978	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
 979	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
 980	 * Octets 13 - 20 are TSA values - leave as zeros
 981	 */
 982	buf[5] = 0x64;
 983	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
 984	offset += len + 2;
 985	tlv = (struct ice_lldp_org_tlv *)
 986		((char *)tlv + sizeof(tlv->typelen) + len);
 987
 988	/* Add ETS REC TLV */
 989	buf = tlv->tlvinfo;
 990	tlv->typelen = htons(typelen);
 991
 992	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 993		      ICE_IEEE_SUBTYPE_ETS_REC);
 994	tlv->ouisubtype = htonl(ouisubtype);
 995
 996	/* First octet of buf is reserved
 997	 * Octets 1 - 4 map UP to TC - all UPs map to zero
 998	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
 999	 * Octets 13 - 20 are TSA value - leave as zeros
1000	 */
1001	buf[5] = 0x64;
1002	offset += len + 2;
1003	tlv = (struct ice_lldp_org_tlv *)
1004		((char *)tlv + sizeof(tlv->typelen) + len);
1005
1006	/* Add PFC CFG TLV */
1007	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1008		   ICE_IEEE_PFC_TLV_LEN);
1009	tlv->typelen = htons(typelen);
1010
1011	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1012		      ICE_IEEE_SUBTYPE_PFC_CFG);
1013	tlv->ouisubtype = htonl(ouisubtype);
1014
1015	/* Octet 1 left as all zeros - PFC disabled */
1016	buf[0] = 0x08;
1017	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
1018	offset += len + 2;
1019
1020	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1021		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1022
1023	kfree(lldpmib);
1024}
1025
1026/**
1027 * ice_check_phy_fw_load - check if PHY FW load failed
1028 * @pf: pointer to PF struct
1029 * @link_cfg_err: bitmap from the link info structure
1030 *
1031 * check if external PHY FW load failed and print an error message if it did
1032 */
1033static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1034{
1035	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1036		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1037		return;
1038	}
1039
1040	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1041		return;
1042
1043	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1044		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1045		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1046	}
1047}
1048
1049/**
1050 * ice_check_module_power
1051 * @pf: pointer to PF struct
1052 * @link_cfg_err: bitmap from the link info structure
1053 *
1054 * check module power level returned by a previous call to aq_get_link_info
1055 * and print error messages if module power level is not supported
1056 */
1057static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1058{
1059	/* if module power level is supported, clear the flag */
1060	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1061			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1062		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1063		return;
1064	}
1065
1066	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1067	 * above block didn't clear this bit, there's nothing to do
1068	 */
1069	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1070		return;
1071
1072	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1073		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1074		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1075	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1076		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1077		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1078	}
1079}
1080
1081/**
1082 * ice_check_link_cfg_err - check if link configuration failed
1083 * @pf: pointer to the PF struct
1084 * @link_cfg_err: bitmap from the link info structure
1085 *
1086 * print if any link configuration failure happens due to the value in the
1087 * link_cfg_err parameter in the link info structure
1088 */
1089static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1090{
1091	ice_check_module_power(pf, link_cfg_err);
1092	ice_check_phy_fw_load(pf, link_cfg_err);
1093}
1094
1095/**
1096 * ice_link_event - process the link event
1097 * @pf: PF that the link event is associated with
1098 * @pi: port_info for the port that the link event is associated with
1099 * @link_up: true if the physical link is up and false if it is down
1100 * @link_speed: current link speed received from the link event
1101 *
1102 * Returns 0 on success and negative on failure
1103 */
1104static int
1105ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1106	       u16 link_speed)
1107{
1108	struct device *dev = ice_pf_to_dev(pf);
1109	struct ice_phy_info *phy_info;
 
1110	struct ice_vsi *vsi;
1111	u16 old_link_speed;
1112	bool old_link;
1113	int status;
1114
1115	phy_info = &pi->phy;
1116	phy_info->link_info_old = phy_info->link_info;
1117
1118	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1119	old_link_speed = phy_info->link_info_old.link_speed;
1120
1121	/* update the link info structures and re-enable link events,
1122	 * don't bail on failure due to other book keeping needed
1123	 */
1124	status = ice_update_link_info(pi);
1125	if (status)
1126		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1127			pi->lport, status,
1128			ice_aq_str(pi->hw->adminq.sq_last_status));
1129
1130	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1131
1132	/* Check if the link state is up after updating link info, and treat
1133	 * this event as an UP event since the link is actually UP now.
1134	 */
1135	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1136		link_up = true;
1137
1138	vsi = ice_get_main_vsi(pf);
1139	if (!vsi || !vsi->port_info)
1140		return -EINVAL;
1141
1142	/* turn off PHY if media was removed */
1143	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1144	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1145		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1146		ice_set_link(vsi, false);
1147	}
1148
1149	/* if the old link up/down and speed is the same as the new */
1150	if (link_up == old_link && link_speed == old_link_speed)
1151		return 0;
1152
1153	ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1154
1155	if (ice_is_dcb_active(pf)) {
1156		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1157			ice_dcb_rebuild(pf);
1158	} else {
1159		if (link_up)
1160			ice_set_dflt_mib(pf);
1161	}
1162	ice_vsi_link_event(vsi, link_up);
1163	ice_print_link_msg(vsi, link_up);
1164
1165	ice_vc_notify_link_state(pf);
1166
1167	return 0;
1168}
1169
1170/**
1171 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1172 * @pf: board private structure
1173 */
1174static void ice_watchdog_subtask(struct ice_pf *pf)
1175{
1176	int i;
1177
1178	/* if interface is down do nothing */
1179	if (test_bit(ICE_DOWN, pf->state) ||
1180	    test_bit(ICE_CFG_BUSY, pf->state))
1181		return;
1182
1183	/* make sure we don't do these things too often */
1184	if (time_before(jiffies,
1185			pf->serv_tmr_prev + pf->serv_tmr_period))
1186		return;
1187
1188	pf->serv_tmr_prev = jiffies;
1189
1190	/* Update the stats for active netdevs so the network stack
1191	 * can look at updated numbers whenever it cares to
1192	 */
1193	ice_update_pf_stats(pf);
1194	ice_for_each_vsi(pf, i)
1195		if (pf->vsi[i] && pf->vsi[i]->netdev)
1196			ice_update_vsi_stats(pf->vsi[i]);
1197}
1198
1199/**
1200 * ice_init_link_events - enable/initialize link events
1201 * @pi: pointer to the port_info instance
1202 *
1203 * Returns -EIO on failure, 0 on success
1204 */
1205static int ice_init_link_events(struct ice_port_info *pi)
1206{
1207	u16 mask;
1208
1209	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1210		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1211		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1212
1213	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1214		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1215			pi->lport);
1216		return -EIO;
1217	}
1218
1219	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1220		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1221			pi->lport);
1222		return -EIO;
1223	}
1224
1225	return 0;
1226}
1227
1228/**
1229 * ice_handle_link_event - handle link event via ARQ
1230 * @pf: PF that the link event is associated with
1231 * @event: event structure containing link status info
1232 */
1233static int
1234ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1235{
1236	struct ice_aqc_get_link_status_data *link_data;
1237	struct ice_port_info *port_info;
1238	int status;
1239
1240	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1241	port_info = pf->hw.port_info;
1242	if (!port_info)
1243		return -EINVAL;
1244
1245	status = ice_link_event(pf, port_info,
1246				!!(link_data->link_info & ICE_AQ_LINK_UP),
1247				le16_to_cpu(link_data->link_speed));
1248	if (status)
1249		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1250			status);
1251
1252	return status;
1253}
1254
1255/**
1256 * ice_get_fwlog_data - copy the FW log data from ARQ event
1257 * @pf: PF that the FW log event is associated with
1258 * @event: event structure containing FW log data
1259 */
1260static void
1261ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
1262{
1263	struct ice_fwlog_data *fwlog;
1264	struct ice_hw *hw = &pf->hw;
1265
1266	fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
1267
1268	memset(fwlog->data, 0, PAGE_SIZE);
1269	fwlog->data_size = le16_to_cpu(event->desc.datalen);
1270
1271	memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
1272	ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
1273
1274	if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
1275		/* the rings are full so bump the head to create room */
1276		ice_fwlog_ring_increment(&hw->fwlog_ring.head,
1277					 hw->fwlog_ring.size);
1278	}
1279}
1280
1281/**
1282 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1283 * @pf: pointer to the PF private structure
1284 * @task: intermediate helper storage and identifier for waiting
1285 * @opcode: the opcode to wait for
 
 
1286 *
1287 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1288 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1289 *
1290 * Calls are separated to allow caller registering for event before sending
1291 * the command, which mitigates a race between registering and FW responding.
1292 *
1293 * To obtain only the descriptor contents, pass an task->event with null
1294 * msg_buf. If the complete data buffer is desired, allocate the
1295 * task->event.msg_buf with enough space ahead of time.
 
 
1296 */
1297void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1298			   u16 opcode)
1299{
 
 
 
 
 
 
 
 
 
 
1300	INIT_HLIST_NODE(&task->entry);
1301	task->opcode = opcode;
 
1302	task->state = ICE_AQ_TASK_WAITING;
1303
1304	spin_lock_bh(&pf->aq_wait_lock);
1305	hlist_add_head(&task->entry, &pf->aq_wait_list);
1306	spin_unlock_bh(&pf->aq_wait_lock);
1307}
1308
1309/**
1310 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1311 * @pf: pointer to the PF private structure
1312 * @task: ptr prepared by ice_aq_prep_for_event()
1313 * @timeout: how long to wait, in jiffies
1314 *
1315 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1316 * current thread will be put to sleep until the specified event occurs or
1317 * until the given timeout is reached.
1318 *
1319 * Returns: zero on success, or a negative error code on failure.
1320 */
1321int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1322			  unsigned long timeout)
1323{
1324	enum ice_aq_task_state *state = &task->state;
1325	struct device *dev = ice_pf_to_dev(pf);
1326	unsigned long start = jiffies;
1327	long ret;
1328	int err;
1329
1330	ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1331					       *state != ICE_AQ_TASK_WAITING,
1332					       timeout);
1333	switch (*state) {
1334	case ICE_AQ_TASK_NOT_PREPARED:
1335		WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1336		err = -EINVAL;
1337		break;
1338	case ICE_AQ_TASK_WAITING:
1339		err = ret < 0 ? ret : -ETIMEDOUT;
1340		break;
1341	case ICE_AQ_TASK_CANCELED:
1342		err = ret < 0 ? ret : -ECANCELED;
1343		break;
1344	case ICE_AQ_TASK_COMPLETE:
1345		err = ret < 0 ? ret : 0;
1346		break;
1347	default:
1348		WARN(1, "Unexpected AdminQ wait task state %u", *state);
1349		err = -EINVAL;
1350		break;
1351	}
1352
1353	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1354		jiffies_to_msecs(jiffies - start),
1355		jiffies_to_msecs(timeout),
1356		task->opcode);
1357
1358	spin_lock_bh(&pf->aq_wait_lock);
1359	hlist_del(&task->entry);
1360	spin_unlock_bh(&pf->aq_wait_lock);
 
1361
1362	return err;
1363}
1364
1365/**
1366 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1367 * @pf: pointer to the PF private structure
1368 * @opcode: the opcode of the event
1369 * @event: the event to check
1370 *
1371 * Loops over the current list of pending threads waiting for an AdminQ event.
1372 * For each matching task, copy the contents of the event into the task
1373 * structure and wake up the thread.
1374 *
1375 * If multiple threads wait for the same opcode, they will all be woken up.
1376 *
1377 * Note that event->msg_buf will only be duplicated if the event has a buffer
1378 * with enough space already allocated. Otherwise, only the descriptor and
1379 * message length will be copied.
1380 *
1381 * Returns: true if an event was found, false otherwise
1382 */
1383static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1384				struct ice_rq_event_info *event)
1385{
1386	struct ice_rq_event_info *task_ev;
1387	struct ice_aq_task *task;
1388	bool found = false;
1389
1390	spin_lock_bh(&pf->aq_wait_lock);
1391	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1392		if (task->state != ICE_AQ_TASK_WAITING)
1393			continue;
1394		if (task->opcode != opcode)
1395			continue;
1396
1397		task_ev = &task->event;
1398		memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1399		task_ev->msg_len = event->msg_len;
1400
1401		/* Only copy the data buffer if a destination was set */
1402		if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1403			memcpy(task_ev->msg_buf, event->msg_buf,
 
1404			       event->buf_len);
1405			task_ev->buf_len = event->buf_len;
1406		}
1407
1408		task->state = ICE_AQ_TASK_COMPLETE;
1409		found = true;
1410	}
1411	spin_unlock_bh(&pf->aq_wait_lock);
1412
1413	if (found)
1414		wake_up(&pf->aq_wait_queue);
1415}
1416
1417/**
1418 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1419 * @pf: the PF private structure
1420 *
1421 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1422 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1423 */
1424static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1425{
1426	struct ice_aq_task *task;
1427
1428	spin_lock_bh(&pf->aq_wait_lock);
1429	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1430		task->state = ICE_AQ_TASK_CANCELED;
1431	spin_unlock_bh(&pf->aq_wait_lock);
1432
1433	wake_up(&pf->aq_wait_queue);
1434}
1435
1436#define ICE_MBX_OVERFLOW_WATERMARK 64
1437
1438/**
1439 * __ice_clean_ctrlq - helper function to clean controlq rings
1440 * @pf: ptr to struct ice_pf
1441 * @q_type: specific Control queue type
1442 */
1443static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1444{
1445	struct device *dev = ice_pf_to_dev(pf);
1446	struct ice_rq_event_info event;
1447	struct ice_hw *hw = &pf->hw;
1448	struct ice_ctl_q_info *cq;
1449	u16 pending, i = 0;
1450	const char *qtype;
1451	u32 oldval, val;
1452
1453	/* Do not clean control queue if/when PF reset fails */
1454	if (test_bit(ICE_RESET_FAILED, pf->state))
1455		return 0;
1456
1457	switch (q_type) {
1458	case ICE_CTL_Q_ADMIN:
1459		cq = &hw->adminq;
1460		qtype = "Admin";
1461		break;
1462	case ICE_CTL_Q_SB:
1463		cq = &hw->sbq;
1464		qtype = "Sideband";
1465		break;
1466	case ICE_CTL_Q_MAILBOX:
1467		cq = &hw->mailboxq;
1468		qtype = "Mailbox";
1469		/* we are going to try to detect a malicious VF, so set the
1470		 * state to begin detection
1471		 */
1472		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1473		break;
1474	default:
1475		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1476		return 0;
1477	}
1478
1479	/* check for error indications - PF_xx_AxQLEN register layout for
1480	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1481	 */
1482	val = rd32(hw, cq->rq.len);
1483	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1484		   PF_FW_ARQLEN_ARQCRIT_M)) {
1485		oldval = val;
1486		if (val & PF_FW_ARQLEN_ARQVFE_M)
1487			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1488				qtype);
1489		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1490			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1491				qtype);
1492		}
1493		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1494			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1495				qtype);
1496		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1497			 PF_FW_ARQLEN_ARQCRIT_M);
1498		if (oldval != val)
1499			wr32(hw, cq->rq.len, val);
1500	}
1501
1502	val = rd32(hw, cq->sq.len);
1503	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1504		   PF_FW_ATQLEN_ATQCRIT_M)) {
1505		oldval = val;
1506		if (val & PF_FW_ATQLEN_ATQVFE_M)
1507			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1508				qtype);
1509		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1510			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1511				qtype);
1512		}
1513		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1514			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1515				qtype);
1516		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1517			 PF_FW_ATQLEN_ATQCRIT_M);
1518		if (oldval != val)
1519			wr32(hw, cq->sq.len, val);
1520	}
1521
1522	event.buf_len = cq->rq_buf_size;
1523	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1524	if (!event.msg_buf)
1525		return 0;
1526
1527	do {
1528		struct ice_mbx_data data = {};
1529		u16 opcode;
1530		int ret;
1531
1532		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1533		if (ret == -EALREADY)
1534			break;
1535		if (ret) {
1536			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1537				ret);
1538			break;
1539		}
1540
1541		opcode = le16_to_cpu(event.desc.opcode);
1542
1543		/* Notify any thread that might be waiting for this event */
1544		ice_aq_check_events(pf, opcode, &event);
1545
1546		switch (opcode) {
1547		case ice_aqc_opc_get_link_status:
1548			if (ice_handle_link_event(pf, &event))
1549				dev_err(dev, "Could not handle link event\n");
1550			break;
1551		case ice_aqc_opc_event_lan_overflow:
1552			ice_vf_lan_overflow_event(pf, &event);
1553			break;
1554		case ice_mbx_opc_send_msg_to_pf:
1555			data.num_msg_proc = i;
1556			data.num_pending_arq = pending;
1557			data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1558			data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1559
1560			ice_vc_process_vf_msg(pf, &event, &data);
1561			break;
1562		case ice_aqc_opc_fw_logs_event:
1563			ice_get_fwlog_data(pf, &event);
1564			break;
1565		case ice_aqc_opc_lldp_set_mib_change:
1566			ice_dcb_process_lldp_set_mib_change(pf, &event);
1567			break;
1568		default:
1569			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1570				qtype, opcode);
1571			break;
1572		}
1573	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1574
1575	kfree(event.msg_buf);
1576
1577	return pending && (i == ICE_DFLT_IRQ_WORK);
1578}
1579
1580/**
1581 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1582 * @hw: pointer to hardware info
1583 * @cq: control queue information
1584 *
1585 * returns true if there are pending messages in a queue, false if there aren't
1586 */
1587static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1588{
1589	u16 ntu;
1590
1591	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1592	return cq->rq.next_to_clean != ntu;
1593}
1594
1595/**
1596 * ice_clean_adminq_subtask - clean the AdminQ rings
1597 * @pf: board private structure
1598 */
1599static void ice_clean_adminq_subtask(struct ice_pf *pf)
1600{
1601	struct ice_hw *hw = &pf->hw;
1602
1603	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1604		return;
1605
1606	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1607		return;
1608
1609	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1610
1611	/* There might be a situation where new messages arrive to a control
1612	 * queue between processing the last message and clearing the
1613	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1614	 * ice_ctrlq_pending) and process new messages if any.
1615	 */
1616	if (ice_ctrlq_pending(hw, &hw->adminq))
1617		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1618
1619	ice_flush(hw);
1620}
1621
1622/**
1623 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1624 * @pf: board private structure
1625 */
1626static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1627{
1628	struct ice_hw *hw = &pf->hw;
1629
1630	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1631		return;
1632
1633	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1634		return;
1635
1636	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1637
1638	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1639		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1640
1641	ice_flush(hw);
1642}
1643
1644/**
1645 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1646 * @pf: board private structure
1647 */
1648static void ice_clean_sbq_subtask(struct ice_pf *pf)
1649{
1650	struct ice_hw *hw = &pf->hw;
1651
1652	/* if mac_type is not generic, sideband is not supported
1653	 * and there's nothing to do here
1654	 */
1655	if (!ice_is_generic_mac(hw)) {
1656		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1657		return;
1658	}
1659
1660	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1661		return;
1662
1663	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1664		return;
1665
1666	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1667
1668	if (ice_ctrlq_pending(hw, &hw->sbq))
1669		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1670
1671	ice_flush(hw);
1672}
1673
1674/**
1675 * ice_service_task_schedule - schedule the service task to wake up
1676 * @pf: board private structure
1677 *
1678 * If not already scheduled, this puts the task into the work queue.
1679 */
1680void ice_service_task_schedule(struct ice_pf *pf)
1681{
1682	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1683	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1684	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1685		queue_work(ice_wq, &pf->serv_task);
1686}
1687
1688/**
1689 * ice_service_task_complete - finish up the service task
1690 * @pf: board private structure
1691 */
1692static void ice_service_task_complete(struct ice_pf *pf)
1693{
1694	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1695
1696	/* force memory (pf->state) to sync before next service task */
1697	smp_mb__before_atomic();
1698	clear_bit(ICE_SERVICE_SCHED, pf->state);
1699}
1700
1701/**
1702 * ice_service_task_stop - stop service task and cancel works
1703 * @pf: board private structure
1704 *
1705 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1706 * 1 otherwise.
1707 */
1708static int ice_service_task_stop(struct ice_pf *pf)
1709{
1710	int ret;
1711
1712	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1713
1714	if (pf->serv_tmr.function)
1715		del_timer_sync(&pf->serv_tmr);
1716	if (pf->serv_task.func)
1717		cancel_work_sync(&pf->serv_task);
1718
1719	clear_bit(ICE_SERVICE_SCHED, pf->state);
1720	return ret;
1721}
1722
1723/**
1724 * ice_service_task_restart - restart service task and schedule works
1725 * @pf: board private structure
1726 *
1727 * This function is needed for suspend and resume works (e.g WoL scenario)
1728 */
1729static void ice_service_task_restart(struct ice_pf *pf)
1730{
1731	clear_bit(ICE_SERVICE_DIS, pf->state);
1732	ice_service_task_schedule(pf);
1733}
1734
1735/**
1736 * ice_service_timer - timer callback to schedule service task
1737 * @t: pointer to timer_list
1738 */
1739static void ice_service_timer(struct timer_list *t)
1740{
1741	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1742
1743	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1744	ice_service_task_schedule(pf);
1745}
1746
1747/**
1748 * ice_handle_mdd_event - handle malicious driver detect event
1749 * @pf: pointer to the PF structure
1750 *
1751 * Called from service task. OICR interrupt handler indicates MDD event.
1752 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1753 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1754 * disable the queue, the PF can be configured to reset the VF using ethtool
1755 * private flag mdd-auto-reset-vf.
1756 */
1757static void ice_handle_mdd_event(struct ice_pf *pf)
1758{
1759	struct device *dev = ice_pf_to_dev(pf);
1760	struct ice_hw *hw = &pf->hw;
1761	struct ice_vf *vf;
1762	unsigned int bkt;
1763	u32 reg;
1764
1765	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1766		/* Since the VF MDD event logging is rate limited, check if
1767		 * there are pending MDD events.
1768		 */
1769		ice_print_vfs_mdd_events(pf);
1770		return;
1771	}
1772
1773	/* find what triggered an MDD event */
1774	reg = rd32(hw, GL_MDET_TX_PQM);
1775	if (reg & GL_MDET_TX_PQM_VALID_M) {
1776		u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
1777		u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
1778		u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
1779		u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
 
 
 
 
1780
1781		if (netif_msg_tx_err(pf))
1782			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1783				 event, queue, pf_num, vf_num);
1784		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1785	}
1786
1787	reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1788	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1789		u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
1790		u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
1791		u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
1792		u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
 
 
 
 
1793
1794		if (netif_msg_tx_err(pf))
1795			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1796				 event, queue, pf_num, vf_num);
1797		wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1798	}
1799
1800	reg = rd32(hw, GL_MDET_RX);
1801	if (reg & GL_MDET_RX_VALID_M) {
1802		u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
1803		u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
1804		u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
1805		u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
 
 
 
 
1806
1807		if (netif_msg_rx_err(pf))
1808			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1809				 event, queue, pf_num, vf_num);
1810		wr32(hw, GL_MDET_RX, 0xffffffff);
1811	}
1812
1813	/* check to see if this PF caused an MDD event */
1814	reg = rd32(hw, PF_MDET_TX_PQM);
1815	if (reg & PF_MDET_TX_PQM_VALID_M) {
1816		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1817		if (netif_msg_tx_err(pf))
1818			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1819	}
1820
1821	reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1822	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1823		wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1824		if (netif_msg_tx_err(pf))
1825			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1826	}
1827
1828	reg = rd32(hw, PF_MDET_RX);
1829	if (reg & PF_MDET_RX_VALID_M) {
1830		wr32(hw, PF_MDET_RX, 0xFFFF);
1831		if (netif_msg_rx_err(pf))
1832			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1833	}
1834
1835	/* Check to see if one of the VFs caused an MDD event, and then
1836	 * increment counters and set print pending
1837	 */
1838	mutex_lock(&pf->vfs.table_lock);
1839	ice_for_each_vf(pf, bkt, vf) {
1840		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
 
1841		if (reg & VP_MDET_TX_PQM_VALID_M) {
1842			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1843			vf->mdd_tx_events.count++;
1844			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1845			if (netif_msg_tx_err(pf))
1846				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1847					 vf->vf_id);
1848		}
1849
1850		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1851		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1852			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1853			vf->mdd_tx_events.count++;
1854			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1855			if (netif_msg_tx_err(pf))
1856				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1857					 vf->vf_id);
1858		}
1859
1860		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1861		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1862			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1863			vf->mdd_tx_events.count++;
1864			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1865			if (netif_msg_tx_err(pf))
1866				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1867					 vf->vf_id);
1868		}
1869
1870		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1871		if (reg & VP_MDET_RX_VALID_M) {
1872			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1873			vf->mdd_rx_events.count++;
1874			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1875			if (netif_msg_rx_err(pf))
1876				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1877					 vf->vf_id);
1878
1879			/* Since the queue is disabled on VF Rx MDD events, the
1880			 * PF can be configured to reset the VF through ethtool
1881			 * private flag mdd-auto-reset-vf.
1882			 */
1883			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1884				/* VF MDD event counters will be cleared by
1885				 * reset, so print the event prior to reset.
1886				 */
1887				ice_print_vf_rx_mdd_event(vf);
1888				ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1889			}
1890		}
1891	}
1892	mutex_unlock(&pf->vfs.table_lock);
1893
1894	ice_print_vfs_mdd_events(pf);
1895}
1896
1897/**
1898 * ice_force_phys_link_state - Force the physical link state
1899 * @vsi: VSI to force the physical link state to up/down
1900 * @link_up: true/false indicates to set the physical link to up/down
1901 *
1902 * Force the physical link state by getting the current PHY capabilities from
1903 * hardware and setting the PHY config based on the determined capabilities. If
1904 * link changes a link event will be triggered because both the Enable Automatic
1905 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1906 *
1907 * Returns 0 on success, negative on failure
1908 */
1909static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1910{
1911	struct ice_aqc_get_phy_caps_data *pcaps;
1912	struct ice_aqc_set_phy_cfg_data *cfg;
1913	struct ice_port_info *pi;
1914	struct device *dev;
1915	int retcode;
1916
1917	if (!vsi || !vsi->port_info || !vsi->back)
1918		return -EINVAL;
1919	if (vsi->type != ICE_VSI_PF)
1920		return 0;
1921
1922	dev = ice_pf_to_dev(vsi->back);
1923
1924	pi = vsi->port_info;
1925
1926	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1927	if (!pcaps)
1928		return -ENOMEM;
1929
1930	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1931				      NULL);
1932	if (retcode) {
1933		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1934			vsi->vsi_num, retcode);
1935		retcode = -EIO;
1936		goto out;
1937	}
1938
1939	/* No change in link */
1940	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1941	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1942		goto out;
1943
1944	/* Use the current user PHY configuration. The current user PHY
1945	 * configuration is initialized during probe from PHY capabilities
1946	 * software mode, and updated on set PHY configuration.
1947	 */
1948	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1949	if (!cfg) {
1950		retcode = -ENOMEM;
1951		goto out;
1952	}
1953
1954	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1955	if (link_up)
1956		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1957	else
1958		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1959
1960	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1961	if (retcode) {
1962		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1963			vsi->vsi_num, retcode);
1964		retcode = -EIO;
1965	}
1966
1967	kfree(cfg);
1968out:
1969	kfree(pcaps);
1970	return retcode;
1971}
1972
1973/**
1974 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1975 * @pi: port info structure
1976 *
1977 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1978 */
1979static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1980{
1981	struct ice_aqc_get_phy_caps_data *pcaps;
1982	struct ice_pf *pf = pi->hw->back;
1983	int err;
 
1984
1985	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1986	if (!pcaps)
1987		return -ENOMEM;
1988
1989	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1990				  pcaps, NULL);
1991
1992	if (err) {
1993		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
 
1994		goto out;
1995	}
1996
1997	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1998	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1999
2000out:
2001	kfree(pcaps);
2002	return err;
2003}
2004
2005/**
2006 * ice_init_link_dflt_override - Initialize link default override
2007 * @pi: port info structure
2008 *
2009 * Initialize link default override and PHY total port shutdown during probe
2010 */
2011static void ice_init_link_dflt_override(struct ice_port_info *pi)
2012{
2013	struct ice_link_default_override_tlv *ldo;
2014	struct ice_pf *pf = pi->hw->back;
2015
2016	ldo = &pf->link_dflt_override;
2017	if (ice_get_link_default_override(ldo, pi))
2018		return;
2019
2020	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2021		return;
2022
2023	/* Enable Total Port Shutdown (override/replace link-down-on-close
2024	 * ethtool private flag) for ports with Port Disable bit set.
2025	 */
2026	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2027	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2028}
2029
2030/**
2031 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2032 * @pi: port info structure
2033 *
2034 * If default override is enabled, initialize the user PHY cfg speed and FEC
2035 * settings using the default override mask from the NVM.
2036 *
2037 * The PHY should only be configured with the default override settings the
2038 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2039 * is used to indicate that the user PHY cfg default override is initialized
2040 * and the PHY has not been configured with the default override settings. The
2041 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2042 * configured.
2043 *
2044 * This function should be called only if the FW doesn't support default
2045 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2046 */
2047static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2048{
2049	struct ice_link_default_override_tlv *ldo;
2050	struct ice_aqc_set_phy_cfg_data *cfg;
2051	struct ice_phy_info *phy = &pi->phy;
2052	struct ice_pf *pf = pi->hw->back;
2053
2054	ldo = &pf->link_dflt_override;
2055
2056	/* If link default override is enabled, use to mask NVM PHY capabilities
2057	 * for speed and FEC default configuration.
2058	 */
2059	cfg = &phy->curr_user_phy_cfg;
2060
2061	if (ldo->phy_type_low || ldo->phy_type_high) {
2062		cfg->phy_type_low = pf->nvm_phy_type_lo &
2063				    cpu_to_le64(ldo->phy_type_low);
2064		cfg->phy_type_high = pf->nvm_phy_type_hi &
2065				     cpu_to_le64(ldo->phy_type_high);
2066	}
2067	cfg->link_fec_opt = ldo->fec_options;
2068	phy->curr_user_fec_req = ICE_FEC_AUTO;
2069
2070	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2071}
2072
2073/**
2074 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2075 * @pi: port info structure
2076 *
2077 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2078 * mode to default. The PHY defaults are from get PHY capabilities topology
2079 * with media so call when media is first available. An error is returned if
2080 * called when media is not available. The PHY initialization completed state is
2081 * set here.
2082 *
2083 * These configurations are used when setting PHY
2084 * configuration. The user PHY configuration is updated on set PHY
2085 * configuration. Returns 0 on success, negative on failure
2086 */
2087static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2088{
2089	struct ice_aqc_get_phy_caps_data *pcaps;
2090	struct ice_phy_info *phy = &pi->phy;
2091	struct ice_pf *pf = pi->hw->back;
2092	int err;
 
2093
2094	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2095		return -EIO;
2096
2097	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2098	if (!pcaps)
2099		return -ENOMEM;
2100
2101	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2102		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2103					  pcaps, NULL);
2104	else
2105		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2106					  pcaps, NULL);
2107	if (err) {
2108		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
 
2109		goto err_out;
2110	}
2111
2112	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2113
2114	/* check if lenient mode is supported and enabled */
2115	if (ice_fw_supports_link_override(pi->hw) &&
2116	    !(pcaps->module_compliance_enforcement &
2117	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2118		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2119
2120		/* if the FW supports default PHY configuration mode, then the driver
2121		 * does not have to apply link override settings. If not,
2122		 * initialize user PHY configuration with link override values
2123		 */
2124		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2125		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2126			ice_init_phy_cfg_dflt_override(pi);
2127			goto out;
2128		}
2129	}
2130
2131	/* if link default override is not enabled, set user flow control and
2132	 * FEC settings based on what get_phy_caps returned
2133	 */
2134	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2135						      pcaps->link_fec_options);
2136	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2137
2138out:
2139	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2140	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2141err_out:
2142	kfree(pcaps);
2143	return err;
2144}
2145
2146/**
2147 * ice_configure_phy - configure PHY
2148 * @vsi: VSI of PHY
2149 *
2150 * Set the PHY configuration. If the current PHY configuration is the same as
2151 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2152 * configure the based get PHY capabilities for topology with media.
2153 */
2154static int ice_configure_phy(struct ice_vsi *vsi)
2155{
2156	struct device *dev = ice_pf_to_dev(vsi->back);
2157	struct ice_port_info *pi = vsi->port_info;
2158	struct ice_aqc_get_phy_caps_data *pcaps;
2159	struct ice_aqc_set_phy_cfg_data *cfg;
2160	struct ice_phy_info *phy = &pi->phy;
2161	struct ice_pf *pf = vsi->back;
2162	int err;
 
2163
2164	/* Ensure we have media as we cannot configure a medialess port */
2165	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2166		return -ENOMEDIUM;
2167
2168	ice_print_topo_conflict(vsi);
2169
2170	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2171	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2172		return -EPERM;
2173
2174	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2175		return ice_force_phys_link_state(vsi, true);
2176
2177	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2178	if (!pcaps)
2179		return -ENOMEM;
2180
2181	/* Get current PHY config */
2182	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2183				  NULL);
2184	if (err) {
2185		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2186			vsi->vsi_num, err);
 
2187		goto done;
2188	}
2189
2190	/* If PHY enable link is configured and configuration has not changed,
2191	 * there's nothing to do
2192	 */
2193	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2194	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2195		goto done;
2196
2197	/* Use PHY topology as baseline for configuration */
2198	memset(pcaps, 0, sizeof(*pcaps));
2199	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2200		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2201					  pcaps, NULL);
2202	else
2203		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2204					  pcaps, NULL);
2205	if (err) {
2206		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2207			vsi->vsi_num, err);
 
2208		goto done;
2209	}
2210
2211	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2212	if (!cfg) {
2213		err = -ENOMEM;
2214		goto done;
2215	}
2216
2217	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2218
2219	/* Speed - If default override pending, use curr_user_phy_cfg set in
2220	 * ice_init_phy_user_cfg_ldo.
2221	 */
2222	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2223			       vsi->back->state)) {
2224		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2225		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2226	} else {
2227		u64 phy_low = 0, phy_high = 0;
2228
2229		ice_update_phy_type(&phy_low, &phy_high,
2230				    pi->phy.curr_user_speed_req);
2231		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2232		cfg->phy_type_high = pcaps->phy_type_high &
2233				     cpu_to_le64(phy_high);
2234	}
2235
2236	/* Can't provide what was requested; use PHY capabilities */
2237	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2238		cfg->phy_type_low = pcaps->phy_type_low;
2239		cfg->phy_type_high = pcaps->phy_type_high;
2240	}
2241
2242	/* FEC */
2243	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2244
2245	/* Can't provide what was requested; use PHY capabilities */
2246	if (cfg->link_fec_opt !=
2247	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2248		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2249		cfg->link_fec_opt = pcaps->link_fec_options;
2250	}
2251
2252	/* Flow Control - always supported; no need to check against
2253	 * capabilities
2254	 */
2255	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2256
2257	/* Enable link and link update */
2258	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2259
2260	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2261	if (err)
2262		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2263			vsi->vsi_num, err);
 
 
2264
2265	kfree(cfg);
2266done:
2267	kfree(pcaps);
2268	return err;
2269}
2270
2271/**
2272 * ice_check_media_subtask - Check for media
2273 * @pf: pointer to PF struct
2274 *
2275 * If media is available, then initialize PHY user configuration if it is not
2276 * been, and configure the PHY if the interface is up.
2277 */
2278static void ice_check_media_subtask(struct ice_pf *pf)
2279{
2280	struct ice_port_info *pi;
2281	struct ice_vsi *vsi;
2282	int err;
2283
2284	/* No need to check for media if it's already present */
2285	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2286		return;
2287
2288	vsi = ice_get_main_vsi(pf);
2289	if (!vsi)
2290		return;
2291
2292	/* Refresh link info and check if media is present */
2293	pi = vsi->port_info;
2294	err = ice_update_link_info(pi);
2295	if (err)
2296		return;
2297
2298	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2299
2300	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2301		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2302			ice_init_phy_user_cfg(pi);
2303
2304		/* PHY settings are reset on media insertion, reconfigure
2305		 * PHY to preserve settings.
2306		 */
2307		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2308		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2309			return;
2310
2311		err = ice_configure_phy(vsi);
2312		if (!err)
2313			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2314
2315		/* A Link Status Event will be generated; the event handler
2316		 * will complete bringing the interface up
2317		 */
2318	}
2319}
2320
2321/**
2322 * ice_service_task - manage and run subtasks
2323 * @work: pointer to work_struct contained by the PF struct
2324 */
2325static void ice_service_task(struct work_struct *work)
2326{
2327	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2328	unsigned long start_time = jiffies;
2329
2330	/* subtasks */
2331
2332	/* process reset requests first */
2333	ice_reset_subtask(pf);
2334
2335	/* bail if a reset/recovery cycle is pending or rebuild failed */
2336	if (ice_is_reset_in_progress(pf->state) ||
2337	    test_bit(ICE_SUSPENDED, pf->state) ||
2338	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2339		ice_service_task_complete(pf);
2340		return;
2341	}
2342
2343	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2344		struct iidc_event *event;
2345
2346		event = kzalloc(sizeof(*event), GFP_KERNEL);
2347		if (event) {
2348			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2349			/* report the entire OICR value to AUX driver */
2350			swap(event->reg, pf->oicr_err_reg);
2351			ice_send_event_to_aux(pf, event);
2352			kfree(event);
2353		}
2354	}
2355
2356	/* unplug aux dev per request, if an unplug request came in
2357	 * while processing a plug request, this will handle it
2358	 */
2359	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2360		ice_unplug_aux_dev(pf);
2361
2362	/* Plug aux device per request */
2363	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2364		ice_plug_aux_dev(pf);
2365
2366	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2367		struct iidc_event *event;
2368
2369		event = kzalloc(sizeof(*event), GFP_KERNEL);
2370		if (event) {
2371			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2372			ice_send_event_to_aux(pf, event);
2373			kfree(event);
2374		}
2375	}
2376
2377	ice_clean_adminq_subtask(pf);
2378	ice_check_media_subtask(pf);
2379	ice_check_for_hang_subtask(pf);
2380	ice_sync_fltr_subtask(pf);
2381	ice_handle_mdd_event(pf);
2382	ice_watchdog_subtask(pf);
2383
2384	if (ice_is_safe_mode(pf)) {
2385		ice_service_task_complete(pf);
2386		return;
2387	}
2388
2389	ice_process_vflr_event(pf);
2390	ice_clean_mailboxq_subtask(pf);
2391	ice_clean_sbq_subtask(pf);
2392	ice_sync_arfs_fltrs(pf);
2393	ice_flush_fdir_ctx(pf);
2394
2395	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2396	ice_service_task_complete(pf);
2397
2398	/* If the tasks have taken longer than one service timer period
2399	 * or there is more work to be done, reset the service timer to
2400	 * schedule the service task now.
2401	 */
2402	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2403	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2404	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2405	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2406	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2407	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2408	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2409		mod_timer(&pf->serv_tmr, jiffies);
2410}
2411
2412/**
2413 * ice_set_ctrlq_len - helper function to set controlq length
2414 * @hw: pointer to the HW instance
2415 */
2416static void ice_set_ctrlq_len(struct ice_hw *hw)
2417{
2418	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2419	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2420	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2421	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2422	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2423	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2424	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2425	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2426	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2427	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2428	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2429	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2430}
2431
2432/**
2433 * ice_schedule_reset - schedule a reset
2434 * @pf: board private structure
2435 * @reset: reset being requested
2436 */
2437int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2438{
2439	struct device *dev = ice_pf_to_dev(pf);
2440
2441	/* bail out if earlier reset has failed */
2442	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2443		dev_dbg(dev, "earlier reset has failed\n");
2444		return -EIO;
2445	}
2446	/* bail if reset/recovery already in progress */
2447	if (ice_is_reset_in_progress(pf->state)) {
2448		dev_dbg(dev, "Reset already in progress\n");
2449		return -EBUSY;
2450	}
2451
 
 
2452	switch (reset) {
2453	case ICE_RESET_PFR:
2454		set_bit(ICE_PFR_REQ, pf->state);
2455		break;
2456	case ICE_RESET_CORER:
2457		set_bit(ICE_CORER_REQ, pf->state);
2458		break;
2459	case ICE_RESET_GLOBR:
2460		set_bit(ICE_GLOBR_REQ, pf->state);
2461		break;
2462	default:
2463		return -EINVAL;
2464	}
2465
2466	ice_service_task_schedule(pf);
2467	return 0;
2468}
2469
2470/**
2471 * ice_irq_affinity_notify - Callback for affinity changes
2472 * @notify: context as to what irq was changed
2473 * @mask: the new affinity mask
2474 *
2475 * This is a callback function used by the irq_set_affinity_notifier function
2476 * so that we may register to receive changes to the irq affinity masks.
2477 */
2478static void
2479ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2480			const cpumask_t *mask)
2481{
2482	struct ice_q_vector *q_vector =
2483		container_of(notify, struct ice_q_vector, affinity_notify);
2484
2485	cpumask_copy(&q_vector->affinity_mask, mask);
2486}
2487
2488/**
2489 * ice_irq_affinity_release - Callback for affinity notifier release
2490 * @ref: internal core kernel usage
2491 *
2492 * This is a callback function used by the irq_set_affinity_notifier function
2493 * to inform the current notification subscriber that they will no longer
2494 * receive notifications.
2495 */
2496static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2497
2498/**
2499 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2500 * @vsi: the VSI being configured
2501 */
2502static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2503{
2504	struct ice_hw *hw = &vsi->back->hw;
2505	int i;
2506
2507	ice_for_each_q_vector(vsi, i)
2508		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2509
2510	ice_flush(hw);
2511	return 0;
2512}
2513
2514/**
2515 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2516 * @vsi: the VSI being configured
2517 * @basename: name for the vector
2518 */
2519static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2520{
2521	int q_vectors = vsi->num_q_vectors;
2522	struct ice_pf *pf = vsi->back;
 
2523	struct device *dev;
2524	int rx_int_idx = 0;
2525	int tx_int_idx = 0;
2526	int vector, err;
2527	int irq_num;
2528
2529	dev = ice_pf_to_dev(pf);
2530	for (vector = 0; vector < q_vectors; vector++) {
2531		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2532
2533		irq_num = q_vector->irq.virq;
2534
2535		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2536			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2537				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2538			tx_int_idx++;
2539		} else if (q_vector->rx.rx_ring) {
2540			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2541				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2542		} else if (q_vector->tx.tx_ring) {
2543			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2544				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2545		} else {
2546			/* skip this unused q_vector */
2547			continue;
2548		}
2549		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2550			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2551					       IRQF_SHARED, q_vector->name,
2552					       q_vector);
2553		else
2554			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2555					       0, q_vector->name, q_vector);
2556		if (err) {
2557			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2558				   err);
2559			goto free_q_irqs;
2560		}
2561
2562		/* register for affinity change notifications */
2563		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2564			struct irq_affinity_notify *affinity_notify;
2565
2566			affinity_notify = &q_vector->affinity_notify;
2567			affinity_notify->notify = ice_irq_affinity_notify;
2568			affinity_notify->release = ice_irq_affinity_release;
2569			irq_set_affinity_notifier(irq_num, affinity_notify);
2570		}
2571
2572		/* assign the mask for this irq */
2573		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2574	}
2575
2576	err = ice_set_cpu_rx_rmap(vsi);
2577	if (err) {
2578		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2579			   vsi->vsi_num, ERR_PTR(err));
2580		goto free_q_irqs;
2581	}
2582
2583	vsi->irqs_ready = true;
2584	return 0;
2585
2586free_q_irqs:
2587	while (vector--) {
2588		irq_num = vsi->q_vectors[vector]->irq.virq;
 
2589		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2590			irq_set_affinity_notifier(irq_num, NULL);
2591		irq_set_affinity_hint(irq_num, NULL);
2592		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2593	}
2594	return err;
2595}
2596
2597/**
2598 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2599 * @vsi: VSI to setup Tx rings used by XDP
2600 *
2601 * Return 0 on success and negative value on error
2602 */
2603static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2604{
2605	struct device *dev = ice_pf_to_dev(vsi->back);
2606	struct ice_tx_desc *tx_desc;
2607	int i, j;
2608
2609	ice_for_each_xdp_txq(vsi, i) {
2610		u16 xdp_q_idx = vsi->alloc_txq + i;
2611		struct ice_ring_stats *ring_stats;
2612		struct ice_tx_ring *xdp_ring;
2613
2614		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2615		if (!xdp_ring)
2616			goto free_xdp_rings;
2617
2618		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2619		if (!ring_stats) {
2620			ice_free_tx_ring(xdp_ring);
2621			goto free_xdp_rings;
2622		}
2623
2624		xdp_ring->ring_stats = ring_stats;
2625		xdp_ring->q_index = xdp_q_idx;
2626		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
 
2627		xdp_ring->vsi = vsi;
2628		xdp_ring->netdev = NULL;
2629		xdp_ring->dev = dev;
2630		xdp_ring->count = vsi->num_tx_desc;
2631		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2632		if (ice_setup_tx_ring(xdp_ring))
2633			goto free_xdp_rings;
2634		ice_set_ring_xdp(xdp_ring);
2635		spin_lock_init(&xdp_ring->tx_lock);
2636		for (j = 0; j < xdp_ring->count; j++) {
2637			tx_desc = ICE_TX_DESC(xdp_ring, j);
2638			tx_desc->cmd_type_offset_bsz = 0;
2639		}
2640	}
2641
2642	return 0;
2643
2644free_xdp_rings:
2645	for (; i >= 0; i--) {
2646		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2647			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2648			vsi->xdp_rings[i]->ring_stats = NULL;
2649			ice_free_tx_ring(vsi->xdp_rings[i]);
2650		}
2651	}
2652	return -ENOMEM;
2653}
2654
2655/**
2656 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2657 * @vsi: VSI to set the bpf prog on
2658 * @prog: the bpf prog pointer
2659 */
2660static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2661{
2662	struct bpf_prog *old_prog;
2663	int i;
2664
2665	old_prog = xchg(&vsi->xdp_prog, prog);
2666	ice_for_each_rxq(vsi, i)
2667		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2668
2669	if (old_prog)
2670		bpf_prog_put(old_prog);
 
 
 
2671}
2672
2673/**
2674 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2675 * @vsi: VSI to bring up Tx rings used by XDP
2676 * @prog: bpf program that will be assigned to VSI
2677 *
2678 * Return 0 on success and negative value on error
2679 */
2680int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2681{
2682	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2683	int xdp_rings_rem = vsi->num_xdp_txq;
2684	struct ice_pf *pf = vsi->back;
2685	struct ice_qs_cfg xdp_qs_cfg = {
2686		.qs_mutex = &pf->avail_q_mutex,
2687		.pf_map = pf->avail_txqs,
2688		.pf_map_size = pf->max_pf_txqs,
2689		.q_count = vsi->num_xdp_txq,
2690		.scatter_count = ICE_MAX_SCATTER_TXQS,
2691		.vsi_map = vsi->txq_map,
2692		.vsi_map_offset = vsi->alloc_txq,
2693		.mapping_mode = ICE_VSI_MAP_CONTIG
2694	};
 
2695	struct device *dev;
2696	int i, v_idx;
2697	int status;
2698
2699	dev = ice_pf_to_dev(pf);
2700	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2701				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2702	if (!vsi->xdp_rings)
2703		return -ENOMEM;
2704
2705	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2706	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2707		goto err_map_xdp;
2708
2709	if (static_key_enabled(&ice_xdp_locking_key))
2710		netdev_warn(vsi->netdev,
2711			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2712
2713	if (ice_xdp_alloc_setup_rings(vsi))
2714		goto clear_xdp_rings;
2715
2716	/* follow the logic from ice_vsi_map_rings_to_vectors */
2717	ice_for_each_q_vector(vsi, v_idx) {
2718		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2719		int xdp_rings_per_v, q_id, q_base;
2720
2721		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2722					       vsi->num_q_vectors - v_idx);
2723		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2724
2725		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2726			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2727
2728			xdp_ring->q_vector = q_vector;
2729			xdp_ring->next = q_vector->tx.tx_ring;
2730			q_vector->tx.tx_ring = xdp_ring;
2731		}
2732		xdp_rings_rem -= xdp_rings_per_v;
2733	}
2734
2735	ice_for_each_rxq(vsi, i) {
2736		if (static_key_enabled(&ice_xdp_locking_key)) {
2737			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2738		} else {
2739			struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2740			struct ice_tx_ring *ring;
2741
2742			ice_for_each_tx_ring(ring, q_vector->tx) {
2743				if (ice_ring_is_xdp(ring)) {
2744					vsi->rx_rings[i]->xdp_ring = ring;
2745					break;
2746				}
2747			}
2748		}
2749		ice_tx_xsk_pool(vsi, i);
2750	}
2751
2752	/* omit the scheduler update if in reset path; XDP queues will be
2753	 * taken into account at the end of ice_vsi_rebuild, where
2754	 * ice_cfg_vsi_lan is being called
2755	 */
2756	if (ice_is_reset_in_progress(pf->state))
2757		return 0;
2758
2759	/* tell the Tx scheduler that right now we have
2760	 * additional queues
2761	 */
2762	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2763		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2764
2765	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2766				 max_txqs);
2767	if (status) {
2768		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2769			status);
2770		goto clear_xdp_rings;
2771	}
2772
2773	/* assign the prog only when it's not already present on VSI;
2774	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2775	 * VSI rebuild that happens under ethtool -L can expose us to
2776	 * the bpf_prog refcount issues as we would be swapping same
2777	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2778	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2779	 * this is not harmful as dev_xdp_install bumps the refcount
2780	 * before calling the op exposed by the driver;
2781	 */
2782	if (!ice_is_xdp_ena_vsi(vsi))
2783		ice_vsi_assign_bpf_prog(vsi, prog);
2784
2785	return 0;
2786clear_xdp_rings:
2787	ice_for_each_xdp_txq(vsi, i)
2788		if (vsi->xdp_rings[i]) {
2789			kfree_rcu(vsi->xdp_rings[i], rcu);
2790			vsi->xdp_rings[i] = NULL;
2791		}
2792
2793err_map_xdp:
2794	mutex_lock(&pf->avail_q_mutex);
2795	ice_for_each_xdp_txq(vsi, i) {
2796		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2797		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2798	}
2799	mutex_unlock(&pf->avail_q_mutex);
2800
2801	devm_kfree(dev, vsi->xdp_rings);
2802	return -ENOMEM;
2803}
2804
2805/**
2806 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2807 * @vsi: VSI to remove XDP rings
2808 *
2809 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2810 * resources
2811 */
2812int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2813{
2814	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2815	struct ice_pf *pf = vsi->back;
2816	int i, v_idx;
2817
2818	/* q_vectors are freed in reset path so there's no point in detaching
2819	 * rings; in case of rebuild being triggered not from reset bits
2820	 * in pf->state won't be set, so additionally check first q_vector
2821	 * against NULL
2822	 */
2823	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2824		goto free_qmap;
2825
2826	ice_for_each_q_vector(vsi, v_idx) {
2827		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2828		struct ice_tx_ring *ring;
2829
2830		ice_for_each_tx_ring(ring, q_vector->tx)
2831			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2832				break;
2833
2834		/* restore the value of last node prior to XDP setup */
2835		q_vector->tx.tx_ring = ring;
2836	}
2837
2838free_qmap:
2839	mutex_lock(&pf->avail_q_mutex);
2840	ice_for_each_xdp_txq(vsi, i) {
2841		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2842		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2843	}
2844	mutex_unlock(&pf->avail_q_mutex);
2845
2846	ice_for_each_xdp_txq(vsi, i)
2847		if (vsi->xdp_rings[i]) {
2848			if (vsi->xdp_rings[i]->desc) {
2849				synchronize_rcu();
2850				ice_free_tx_ring(vsi->xdp_rings[i]);
2851			}
2852			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2853			vsi->xdp_rings[i]->ring_stats = NULL;
2854			kfree_rcu(vsi->xdp_rings[i], rcu);
2855			vsi->xdp_rings[i] = NULL;
2856		}
2857
2858	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2859	vsi->xdp_rings = NULL;
2860
2861	if (static_key_enabled(&ice_xdp_locking_key))
2862		static_branch_dec(&ice_xdp_locking_key);
2863
2864	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2865		return 0;
2866
2867	ice_vsi_assign_bpf_prog(vsi, NULL);
2868
2869	/* notify Tx scheduler that we destroyed XDP queues and bring
2870	 * back the old number of child nodes
2871	 */
2872	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2873		max_txqs[i] = vsi->num_txq;
2874
2875	/* change number of XDP Tx queues to 0 */
2876	vsi->num_xdp_txq = 0;
2877
2878	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2879			       max_txqs);
2880}
2881
2882/**
2883 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2884 * @vsi: VSI to schedule napi on
2885 */
2886static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2887{
2888	int i;
2889
2890	ice_for_each_rxq(vsi, i) {
2891		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2892
2893		if (rx_ring->xsk_pool)
2894			napi_schedule(&rx_ring->q_vector->napi);
2895	}
2896}
2897
2898/**
2899 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2900 * @vsi: VSI to determine the count of XDP Tx qs
2901 *
2902 * returns 0 if Tx qs count is higher than at least half of CPU count,
2903 * -ENOMEM otherwise
2904 */
2905int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2906{
2907	u16 avail = ice_get_avail_txq_count(vsi->back);
2908	u16 cpus = num_possible_cpus();
2909
2910	if (avail < cpus / 2)
2911		return -ENOMEM;
2912
2913	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2914
2915	if (vsi->num_xdp_txq < cpus)
2916		static_branch_inc(&ice_xdp_locking_key);
2917
2918	return 0;
2919}
2920
2921/**
2922 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2923 * @vsi: Pointer to VSI structure
2924 */
2925static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2926{
2927	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2928		return ICE_RXBUF_1664;
2929	else
2930		return ICE_RXBUF_3072;
2931}
2932
2933/**
2934 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2935 * @vsi: VSI to setup XDP for
2936 * @prog: XDP program
2937 * @extack: netlink extended ack
2938 */
2939static int
2940ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2941		   struct netlink_ext_ack *extack)
2942{
2943	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2944	bool if_running = netif_running(vsi->netdev);
2945	int ret = 0, xdp_ring_err = 0;
2946
2947	if (prog && !prog->aux->xdp_has_frags) {
2948		if (frame_size > ice_max_xdp_frame_size(vsi)) {
2949			NL_SET_ERR_MSG_MOD(extack,
2950					   "MTU is too large for linear frames and XDP prog does not support frags");
2951			return -EOPNOTSUPP;
2952		}
2953	}
2954
2955	/* hot swap progs and avoid toggling link */
2956	if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
2957		ice_vsi_assign_bpf_prog(vsi, prog);
2958		return 0;
2959	}
2960
2961	/* need to stop netdev while setting up the program for Rx rings */
2962	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2963		ret = ice_down(vsi);
2964		if (ret) {
2965			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2966			return ret;
2967		}
2968	}
2969
2970	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2971		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2972		if (xdp_ring_err) {
2973			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2974		} else {
2975			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2976			if (xdp_ring_err)
2977				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2978		}
2979		xdp_features_set_redirect_target(vsi->netdev, true);
2980		/* reallocate Rx queues that are used for zero-copy */
2981		xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2982		if (xdp_ring_err)
2983			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2984	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2985		xdp_features_clear_redirect_target(vsi->netdev);
2986		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2987		if (xdp_ring_err)
2988			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2989		/* reallocate Rx queues that were used for zero-copy */
2990		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2991		if (xdp_ring_err)
2992			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2993	}
2994
2995	if (if_running)
2996		ret = ice_up(vsi);
2997
2998	if (!ret && prog)
2999		ice_vsi_rx_napi_schedule(vsi);
3000
3001	return (ret || xdp_ring_err) ? -ENOMEM : 0;
3002}
3003
3004/**
3005 * ice_xdp_safe_mode - XDP handler for safe mode
3006 * @dev: netdevice
3007 * @xdp: XDP command
3008 */
3009static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3010			     struct netdev_bpf *xdp)
3011{
3012	NL_SET_ERR_MSG_MOD(xdp->extack,
3013			   "Please provide working DDP firmware package in order to use XDP\n"
3014			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3015	return -EOPNOTSUPP;
3016}
3017
3018/**
3019 * ice_xdp - implements XDP handler
3020 * @dev: netdevice
3021 * @xdp: XDP command
3022 */
3023static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3024{
3025	struct ice_netdev_priv *np = netdev_priv(dev);
3026	struct ice_vsi *vsi = np->vsi;
3027
3028	if (vsi->type != ICE_VSI_PF) {
3029		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3030		return -EINVAL;
3031	}
3032
3033	switch (xdp->command) {
3034	case XDP_SETUP_PROG:
3035		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3036	case XDP_SETUP_XSK_POOL:
3037		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3038					  xdp->xsk.queue_id);
3039	default:
3040		return -EINVAL;
3041	}
3042}
3043
3044/**
3045 * ice_ena_misc_vector - enable the non-queue interrupts
3046 * @pf: board private structure
3047 */
3048static void ice_ena_misc_vector(struct ice_pf *pf)
3049{
3050	struct ice_hw *hw = &pf->hw;
3051	u32 pf_intr_start_offset;
3052	u32 val;
3053
3054	/* Disable anti-spoof detection interrupt to prevent spurious event
3055	 * interrupts during a function reset. Anti-spoof functionally is
3056	 * still supported.
3057	 */
3058	val = rd32(hw, GL_MDCK_TX_TDPU);
3059	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3060	wr32(hw, GL_MDCK_TX_TDPU, val);
3061
3062	/* clear things first */
3063	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
3064	rd32(hw, PFINT_OICR);		/* read to clear */
3065
3066	val = (PFINT_OICR_ECC_ERR_M |
3067	       PFINT_OICR_MAL_DETECT_M |
3068	       PFINT_OICR_GRST_M |
3069	       PFINT_OICR_PCI_EXCEPTION_M |
3070	       PFINT_OICR_VFLR_M |
3071	       PFINT_OICR_HMC_ERR_M |
3072	       PFINT_OICR_PE_PUSH_M |
3073	       PFINT_OICR_PE_CRITERR_M);
3074
3075	wr32(hw, PFINT_OICR_ENA, val);
3076
3077	/* SW_ITR_IDX = 0, but don't change INTENA */
3078	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3079	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3080
3081	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3082		return;
3083	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3084	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3085	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3086}
3087
3088/**
3089 * ice_ll_ts_intr - ll_ts interrupt handler
3090 * @irq: interrupt number
3091 * @data: pointer to a q_vector
3092 */
3093static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
3094{
3095	struct ice_pf *pf = data;
3096	u32 pf_intr_start_offset;
3097	struct ice_ptp_tx *tx;
3098	unsigned long flags;
3099	struct ice_hw *hw;
3100	u32 val;
3101	u8 idx;
3102
3103	hw = &pf->hw;
3104	tx = &pf->ptp.port.tx;
3105	spin_lock_irqsave(&tx->lock, flags);
3106	ice_ptp_complete_tx_single_tstamp(tx);
3107
3108	idx = find_next_bit_wrap(tx->in_use, tx->len,
3109				 tx->last_ll_ts_idx_read + 1);
3110	if (idx != tx->len)
3111		ice_ptp_req_tx_single_tstamp(tx, idx);
3112	spin_unlock_irqrestore(&tx->lock, flags);
3113
3114	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3115	      (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
3116	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3117	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3118	     val);
3119
3120	return IRQ_HANDLED;
3121}
3122
3123/**
3124 * ice_misc_intr - misc interrupt handler
3125 * @irq: interrupt number
3126 * @data: pointer to a q_vector
3127 */
3128static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3129{
3130	struct ice_pf *pf = (struct ice_pf *)data;
3131	irqreturn_t ret = IRQ_HANDLED;
3132	struct ice_hw *hw = &pf->hw;
 
3133	struct device *dev;
3134	u32 oicr, ena_mask;
3135
3136	dev = ice_pf_to_dev(pf);
3137	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3138	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3139	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3140
3141	oicr = rd32(hw, PFINT_OICR);
3142	ena_mask = rd32(hw, PFINT_OICR_ENA);
3143
3144	if (oicr & PFINT_OICR_SWINT_M) {
3145		ena_mask &= ~PFINT_OICR_SWINT_M;
3146		pf->sw_int_count++;
3147	}
3148
3149	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3150		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3151		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3152	}
3153	if (oicr & PFINT_OICR_VFLR_M) {
3154		/* disable any further VFLR event notifications */
3155		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3156			u32 reg = rd32(hw, PFINT_OICR_ENA);
3157
3158			reg &= ~PFINT_OICR_VFLR_M;
3159			wr32(hw, PFINT_OICR_ENA, reg);
3160		} else {
3161			ena_mask &= ~PFINT_OICR_VFLR_M;
3162			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3163		}
3164	}
3165
3166	if (oicr & PFINT_OICR_GRST_M) {
3167		u32 reset;
3168
3169		/* we have a reset warning */
3170		ena_mask &= ~PFINT_OICR_GRST_M;
3171		reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
3172				  rd32(hw, GLGEN_RSTAT));
3173
3174		if (reset == ICE_RESET_CORER)
3175			pf->corer_count++;
3176		else if (reset == ICE_RESET_GLOBR)
3177			pf->globr_count++;
3178		else if (reset == ICE_RESET_EMPR)
3179			pf->empr_count++;
3180		else
3181			dev_dbg(dev, "Invalid reset type %d\n", reset);
3182
3183		/* If a reset cycle isn't already in progress, we set a bit in
3184		 * pf->state so that the service task can start a reset/rebuild.
3185		 */
3186		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3187			if (reset == ICE_RESET_CORER)
3188				set_bit(ICE_CORER_RECV, pf->state);
3189			else if (reset == ICE_RESET_GLOBR)
3190				set_bit(ICE_GLOBR_RECV, pf->state);
3191			else
3192				set_bit(ICE_EMPR_RECV, pf->state);
3193
3194			/* There are couple of different bits at play here.
3195			 * hw->reset_ongoing indicates whether the hardware is
3196			 * in reset. This is set to true when a reset interrupt
3197			 * is received and set back to false after the driver
3198			 * has determined that the hardware is out of reset.
3199			 *
3200			 * ICE_RESET_OICR_RECV in pf->state indicates
3201			 * that a post reset rebuild is required before the
3202			 * driver is operational again. This is set above.
3203			 *
3204			 * As this is the start of the reset/rebuild cycle, set
3205			 * both to indicate that.
3206			 */
3207			hw->reset_ongoing = true;
3208		}
3209	}
3210
3211	if (oicr & PFINT_OICR_TSYN_TX_M) {
3212		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3213		if (ice_pf_state_is_nominal(pf) &&
3214		    pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
3215			struct ice_ptp_tx *tx = &pf->ptp.port.tx;
3216			unsigned long flags;
3217			u8 idx;
3218
3219			spin_lock_irqsave(&tx->lock, flags);
3220			idx = find_next_bit_wrap(tx->in_use, tx->len,
3221						 tx->last_ll_ts_idx_read + 1);
3222			if (idx != tx->len)
3223				ice_ptp_req_tx_single_tstamp(tx, idx);
3224			spin_unlock_irqrestore(&tx->lock, flags);
3225		} else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
3226			set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3227			ret = IRQ_WAKE_THREAD;
3228		}
3229	}
3230
3231	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3232		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3233		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3234
 
 
 
 
3235		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3236
3237		if (ice_pf_src_tmr_owned(pf)) {
3238			/* Save EVENTs from GLTSYN register */
3239			pf->ptp.ext_ts_irq |= gltsyn_stat &
3240					      (GLTSYN_STAT_EVENT0_M |
3241					       GLTSYN_STAT_EVENT1_M |
3242					       GLTSYN_STAT_EVENT2_M);
3243
3244			ice_ptp_extts_event(pf);
3245		}
3246	}
3247
3248#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3249	if (oicr & ICE_AUX_CRIT_ERR) {
3250		pf->oicr_err_reg |= oicr;
3251		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3252		ena_mask &= ~ICE_AUX_CRIT_ERR;
 
 
 
 
 
 
 
 
3253	}
3254
3255	/* Report any remaining unexpected interrupts */
3256	oicr &= ena_mask;
3257	if (oicr) {
3258		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3259		/* If a critical error is pending there is no choice but to
3260		 * reset the device.
3261		 */
3262		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3263			    PFINT_OICR_ECC_ERR_M)) {
3264			set_bit(ICE_PFR_REQ, pf->state);
 
3265		}
3266	}
3267	ice_service_task_schedule(pf);
3268	if (ret == IRQ_HANDLED)
3269		ice_irq_dynamic_ena(hw, NULL, NULL);
3270
3271	return ret;
3272}
3273
3274/**
3275 * ice_misc_intr_thread_fn - misc interrupt thread function
3276 * @irq: interrupt number
3277 * @data: pointer to a q_vector
3278 */
3279static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3280{
3281	struct ice_pf *pf = data;
3282	struct ice_hw *hw;
3283
3284	hw = &pf->hw;
3285
3286	if (ice_is_reset_in_progress(pf->state))
3287		goto skip_irq;
3288
3289	if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3290		/* Process outstanding Tx timestamps. If there is more work,
3291		 * re-arm the interrupt to trigger again.
3292		 */
3293		if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3294			wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3295			ice_flush(hw);
3296		}
3297	}
3298
3299skip_irq:
3300	ice_irq_dynamic_ena(hw, NULL, NULL);
3301
3302	return IRQ_HANDLED;
3303}
3304
3305/**
3306 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3307 * @hw: pointer to HW structure
3308 */
3309static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3310{
3311	/* disable Admin queue Interrupt causes */
3312	wr32(hw, PFINT_FW_CTL,
3313	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3314
3315	/* disable Mailbox queue Interrupt causes */
3316	wr32(hw, PFINT_MBX_CTL,
3317	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3318
3319	wr32(hw, PFINT_SB_CTL,
3320	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3321
3322	/* disable Control queue Interrupt causes */
3323	wr32(hw, PFINT_OICR_CTL,
3324	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3325
3326	ice_flush(hw);
3327}
3328
3329/**
3330 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3331 * @pf: board private structure
3332 */
3333static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3334{
3335	int irq_num = pf->ll_ts_irq.virq;
3336
3337	synchronize_irq(irq_num);
3338	devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3339
3340	ice_free_irq(pf, pf->ll_ts_irq);
3341}
3342
3343/**
3344 * ice_free_irq_msix_misc - Unroll misc vector setup
3345 * @pf: board private structure
3346 */
3347static void ice_free_irq_msix_misc(struct ice_pf *pf)
3348{
3349	int misc_irq_num = pf->oicr_irq.virq;
3350	struct ice_hw *hw = &pf->hw;
3351
3352	ice_dis_ctrlq_interrupts(hw);
3353
3354	/* disable OICR interrupt */
3355	wr32(hw, PFINT_OICR_ENA, 0);
3356	ice_flush(hw);
3357
3358	synchronize_irq(misc_irq_num);
3359	devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
 
 
 
3360
3361	ice_free_irq(pf, pf->oicr_irq);
3362	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3363		ice_free_irq_msix_ll_ts(pf);
3364}
3365
3366/**
3367 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3368 * @hw: pointer to HW structure
3369 * @reg_idx: HW vector index to associate the control queue interrupts with
3370 */
3371static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3372{
3373	u32 val;
3374
3375	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3376	       PFINT_OICR_CTL_CAUSE_ENA_M);
3377	wr32(hw, PFINT_OICR_CTL, val);
3378
3379	/* enable Admin queue Interrupt causes */
3380	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3381	       PFINT_FW_CTL_CAUSE_ENA_M);
3382	wr32(hw, PFINT_FW_CTL, val);
3383
3384	/* enable Mailbox queue Interrupt causes */
3385	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3386	       PFINT_MBX_CTL_CAUSE_ENA_M);
3387	wr32(hw, PFINT_MBX_CTL, val);
3388
3389	if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
3390		/* enable Sideband queue Interrupt causes */
3391		val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3392		       PFINT_SB_CTL_CAUSE_ENA_M);
3393		wr32(hw, PFINT_SB_CTL, val);
3394	}
3395
3396	ice_flush(hw);
3397}
3398
3399/**
3400 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3401 * @pf: board private structure
3402 *
3403 * This sets up the handler for MSIX 0, which is used to manage the
3404 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3405 * when in MSI or Legacy interrupt mode.
3406 */
3407static int ice_req_irq_msix_misc(struct ice_pf *pf)
3408{
3409	struct device *dev = ice_pf_to_dev(pf);
3410	struct ice_hw *hw = &pf->hw;
3411	u32 pf_intr_start_offset;
3412	struct msi_map irq;
3413	int err = 0;
3414
3415	if (!pf->int_name[0])
3416		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3417			 dev_driver_string(dev), dev_name(dev));
3418
3419	if (!pf->int_name_ll_ts[0])
3420		snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3421			 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
3422	/* Do not request IRQ but do enable OICR interrupt since settings are
3423	 * lost during reset. Note that this function is called only during
3424	 * rebuild path and not while reset is in progress.
3425	 */
3426	if (ice_is_reset_in_progress(pf->state))
3427		goto skip_req_irq;
3428
3429	/* reserve one vector in irq_tracker for misc interrupts */
3430	irq = ice_alloc_irq(pf, false);
3431	if (irq.index < 0)
3432		return irq.index;
3433
3434	pf->oicr_irq = irq;
3435	err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3436					ice_misc_intr_thread_fn, 0,
3437					pf->int_name, pf);
3438	if (err) {
3439		dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3440			pf->int_name, err);
3441		ice_free_irq(pf, pf->oicr_irq);
3442		return err;
3443	}
3444
3445	/* reserve one vector in irq_tracker for ll_ts interrupt */
3446	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3447		goto skip_req_irq;
3448
3449	irq = ice_alloc_irq(pf, false);
3450	if (irq.index < 0)
3451		return irq.index;
3452
3453	pf->ll_ts_irq = irq;
3454	err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3455			       pf->int_name_ll_ts, pf);
3456	if (err) {
3457		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3458			pf->int_name_ll_ts, err);
3459		ice_free_irq(pf, pf->ll_ts_irq);
 
3460		return err;
3461	}
3462
3463skip_req_irq:
3464	ice_ena_misc_vector(pf);
3465
3466	ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3467	/* This enables LL TS interrupt */
3468	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3469	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3470		wr32(hw, PFINT_SB_CTL,
3471		     ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3472		      PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
3473	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3474	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3475
3476	ice_flush(hw);
3477	ice_irq_dynamic_ena(hw, NULL, NULL);
3478
3479	return 0;
3480}
3481
3482/**
3483 * ice_napi_add - register NAPI handler for the VSI
3484 * @vsi: VSI for which NAPI handler is to be registered
3485 *
3486 * This function is only called in the driver's load path. Registering the NAPI
3487 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3488 * reset/rebuild, etc.)
3489 */
3490static void ice_napi_add(struct ice_vsi *vsi)
3491{
3492	int v_idx;
3493
3494	if (!vsi->netdev)
3495		return;
3496
3497	ice_for_each_q_vector(vsi, v_idx) {
3498		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3499			       ice_napi_poll);
3500		__ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
3501	}
3502}
3503
3504/**
3505 * ice_set_ops - set netdev and ethtools ops for the given netdev
3506 * @vsi: the VSI associated with the new netdev
3507 */
3508static void ice_set_ops(struct ice_vsi *vsi)
3509{
3510	struct net_device *netdev = vsi->netdev;
3511	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3512
3513	if (ice_is_safe_mode(pf)) {
3514		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3515		ice_set_ethtool_safe_mode_ops(netdev);
3516		return;
3517	}
3518
3519	netdev->netdev_ops = &ice_netdev_ops;
3520	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3521	netdev->xdp_metadata_ops = &ice_xdp_md_ops;
3522	ice_set_ethtool_ops(netdev);
3523
3524	if (vsi->type != ICE_VSI_PF)
3525		return;
3526
3527	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3528			       NETDEV_XDP_ACT_XSK_ZEROCOPY |
3529			       NETDEV_XDP_ACT_RX_SG;
3530	netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3531}
3532
3533/**
3534 * ice_set_netdev_features - set features for the given netdev
3535 * @netdev: netdev instance
3536 */
3537static void ice_set_netdev_features(struct net_device *netdev)
3538{
3539	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3540	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3541	netdev_features_t csumo_features;
3542	netdev_features_t vlano_features;
3543	netdev_features_t dflt_features;
3544	netdev_features_t tso_features;
3545
3546	if (ice_is_safe_mode(pf)) {
3547		/* safe mode */
3548		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3549		netdev->hw_features = netdev->features;
3550		return;
3551	}
3552
3553	dflt_features = NETIF_F_SG	|
3554			NETIF_F_HIGHDMA	|
3555			NETIF_F_NTUPLE	|
3556			NETIF_F_RXHASH;
3557
3558	csumo_features = NETIF_F_RXCSUM	  |
3559			 NETIF_F_IP_CSUM  |
3560			 NETIF_F_SCTP_CRC |
3561			 NETIF_F_IPV6_CSUM;
3562
3563	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3564			 NETIF_F_HW_VLAN_CTAG_TX     |
3565			 NETIF_F_HW_VLAN_CTAG_RX;
3566
3567	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3568	if (is_dvm_ena)
3569		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3570
3571	tso_features = NETIF_F_TSO			|
3572		       NETIF_F_TSO_ECN			|
3573		       NETIF_F_TSO6			|
3574		       NETIF_F_GSO_GRE			|
3575		       NETIF_F_GSO_UDP_TUNNEL		|
3576		       NETIF_F_GSO_GRE_CSUM		|
3577		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3578		       NETIF_F_GSO_PARTIAL		|
3579		       NETIF_F_GSO_IPXIP4		|
3580		       NETIF_F_GSO_IPXIP6		|
3581		       NETIF_F_GSO_UDP_L4;
3582
3583	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3584					NETIF_F_GSO_GRE_CSUM;
3585	/* set features that user can change */
3586	netdev->hw_features = dflt_features | csumo_features |
3587			      vlano_features | tso_features;
3588
3589	/* add support for HW_CSUM on packets with MPLS header */
3590	netdev->mpls_features =  NETIF_F_HW_CSUM |
3591				 NETIF_F_TSO     |
3592				 NETIF_F_TSO6;
3593
3594	/* enable features */
3595	netdev->features |= netdev->hw_features;
3596
3597	netdev->hw_features |= NETIF_F_HW_TC;
3598	netdev->hw_features |= NETIF_F_LOOPBACK;
3599
3600	/* encap and VLAN devices inherit default, csumo and tso features */
3601	netdev->hw_enc_features |= dflt_features | csumo_features |
3602				   tso_features;
3603	netdev->vlan_features |= dflt_features | csumo_features |
3604				 tso_features;
 
3605
3606	/* advertise support but don't enable by default since only one type of
3607	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3608	 * type turns on the other has to be turned off. This is enforced by the
3609	 * ice_fix_features() ndo callback.
3610	 */
3611	if (is_dvm_ena)
3612		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3613			NETIF_F_HW_VLAN_STAG_TX;
 
 
 
3614
3615	/* Leave CRC / FCS stripping enabled by default, but allow the value to
3616	 * be changed at runtime
3617	 */
3618	netdev->hw_features |= NETIF_F_RXFCS;
3619
3620	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3621}
3622
3623/**
3624 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3625 * @lut: Lookup table
3626 * @rss_table_size: Lookup table size
3627 * @rss_size: Range of queue number for hashing
3628 */
3629void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3630{
3631	u16 i;
3632
3633	for (i = 0; i < rss_table_size; i++)
3634		lut[i] = i % rss_size;
3635}
3636
3637/**
3638 * ice_pf_vsi_setup - Set up a PF VSI
3639 * @pf: board private structure
3640 * @pi: pointer to the port_info instance
3641 *
3642 * Returns pointer to the successfully allocated VSI software struct
3643 * on success, otherwise returns NULL on failure.
3644 */
3645static struct ice_vsi *
3646ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3647{
3648	struct ice_vsi_cfg_params params = {};
3649
3650	params.type = ICE_VSI_PF;
3651	params.pi = pi;
3652	params.flags = ICE_VSI_FLAG_INIT;
3653
3654	return ice_vsi_setup(pf, &params);
3655}
3656
3657static struct ice_vsi *
3658ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3659		   struct ice_channel *ch)
3660{
3661	struct ice_vsi_cfg_params params = {};
3662
3663	params.type = ICE_VSI_CHNL;
3664	params.pi = pi;
3665	params.ch = ch;
3666	params.flags = ICE_VSI_FLAG_INIT;
3667
3668	return ice_vsi_setup(pf, &params);
3669}
3670
3671/**
3672 * ice_ctrl_vsi_setup - Set up a control VSI
3673 * @pf: board private structure
3674 * @pi: pointer to the port_info instance
3675 *
3676 * Returns pointer to the successfully allocated VSI software struct
3677 * on success, otherwise returns NULL on failure.
3678 */
3679static struct ice_vsi *
3680ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3681{
3682	struct ice_vsi_cfg_params params = {};
3683
3684	params.type = ICE_VSI_CTRL;
3685	params.pi = pi;
3686	params.flags = ICE_VSI_FLAG_INIT;
3687
3688	return ice_vsi_setup(pf, &params);
3689}
3690
3691/**
3692 * ice_lb_vsi_setup - Set up a loopback VSI
3693 * @pf: board private structure
3694 * @pi: pointer to the port_info instance
3695 *
3696 * Returns pointer to the successfully allocated VSI software struct
3697 * on success, otherwise returns NULL on failure.
3698 */
3699struct ice_vsi *
3700ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3701{
3702	struct ice_vsi_cfg_params params = {};
3703
3704	params.type = ICE_VSI_LB;
3705	params.pi = pi;
3706	params.flags = ICE_VSI_FLAG_INIT;
3707
3708	return ice_vsi_setup(pf, &params);
3709}
3710
3711/**
3712 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3713 * @netdev: network interface to be adjusted
3714 * @proto: VLAN TPID
3715 * @vid: VLAN ID to be added
3716 *
3717 * net_device_ops implementation for adding VLAN IDs
3718 */
3719static int
3720ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 
3721{
3722	struct ice_netdev_priv *np = netdev_priv(netdev);
3723	struct ice_vsi_vlan_ops *vlan_ops;
3724	struct ice_vsi *vsi = np->vsi;
3725	struct ice_vlan vlan;
3726	int ret;
3727
3728	/* VLAN 0 is added by default during load/reset */
3729	if (!vid)
3730		return 0;
3731
3732	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3733		usleep_range(1000, 2000);
3734
3735	/* Add multicast promisc rule for the VLAN ID to be added if
3736	 * all-multicast is currently enabled.
3737	 */
3738	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3739		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3740					       ICE_MCAST_VLAN_PROMISC_BITS,
3741					       vid);
3742		if (ret)
3743			goto finish;
3744	}
3745
3746	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3747
3748	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3749	 * packets aren't pruned by the device's internal switch on Rx
3750	 */
3751	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3752	ret = vlan_ops->add_vlan(vsi, &vlan);
3753	if (ret)
3754		goto finish;
3755
3756	/* If all-multicast is currently enabled and this VLAN ID is only one
3757	 * besides VLAN-0 we have to update look-up type of multicast promisc
3758	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3759	 */
3760	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3761	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
3762		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3763					   ICE_MCAST_PROMISC_BITS, 0);
3764		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3765					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3766	}
3767
3768finish:
3769	clear_bit(ICE_CFG_BUSY, vsi->state);
3770
3771	return ret;
3772}
3773
3774/**
3775 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3776 * @netdev: network interface to be adjusted
3777 * @proto: VLAN TPID
3778 * @vid: VLAN ID to be removed
3779 *
3780 * net_device_ops implementation for removing VLAN IDs
3781 */
3782static int
3783ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
 
3784{
3785	struct ice_netdev_priv *np = netdev_priv(netdev);
3786	struct ice_vsi_vlan_ops *vlan_ops;
3787	struct ice_vsi *vsi = np->vsi;
3788	struct ice_vlan vlan;
3789	int ret;
3790
3791	/* don't allow removal of VLAN 0 */
3792	if (!vid)
3793		return 0;
3794
3795	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3796		usleep_range(1000, 2000);
3797
3798	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3799				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3800	if (ret) {
3801		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3802			   vsi->vsi_num);
3803		vsi->current_netdev_flags |= IFF_ALLMULTI;
3804	}
3805
3806	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3807
3808	/* Make sure VLAN delete is successful before updating VLAN
3809	 * information
3810	 */
3811	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3812	ret = vlan_ops->del_vlan(vsi, &vlan);
3813	if (ret)
3814		goto finish;
3815
3816	/* Remove multicast promisc rule for the removed VLAN ID if
3817	 * all-multicast is enabled.
3818	 */
3819	if (vsi->current_netdev_flags & IFF_ALLMULTI)
3820		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3821					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
3822
3823	if (!ice_vsi_has_non_zero_vlans(vsi)) {
3824		/* Update look-up type of multicast promisc rule for VLAN 0
3825		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3826		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3827		 */
3828		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3829			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3830						   ICE_MCAST_VLAN_PROMISC_BITS,
3831						   0);
3832			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3833						 ICE_MCAST_PROMISC_BITS, 0);
3834		}
3835	}
3836
3837finish:
3838	clear_bit(ICE_CFG_BUSY, vsi->state);
 
3839
 
3840	return ret;
3841}
3842
3843/**
3844 * ice_rep_indr_tc_block_unbind
3845 * @cb_priv: indirection block private data
 
 
3846 */
3847static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3848{
3849	struct ice_indr_block_priv *indr_priv = cb_priv;
 
3850
3851	list_del(&indr_priv->list);
3852	kfree(indr_priv);
3853}
3854
3855/**
3856 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3857 * @vsi: VSI struct which has the netdev
3858 */
3859static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3860{
3861	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3862
3863	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3864				 ice_rep_indr_tc_block_unbind);
3865}
 
 
 
 
3866
3867/**
3868 * ice_tc_indir_block_register - Register TC indirect block notifications
3869 * @vsi: VSI struct which has the netdev
3870 *
3871 * Returns 0 on success, negative value on failure
3872 */
3873static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3874{
3875	struct ice_netdev_priv *np;
3876
3877	if (!vsi || !vsi->netdev)
3878		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3879
3880	np = netdev_priv(vsi->netdev);
 
 
 
 
 
 
 
 
3881
3882	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3883	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
 
3884}
3885
3886/**
3887 * ice_get_avail_q_count - Get count of queues in use
3888 * @pf_qmap: bitmap to get queue use count from
3889 * @lock: pointer to a mutex that protects access to pf_qmap
3890 * @size: size of the bitmap
3891 */
3892static u16
3893ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3894{
3895	unsigned long bit;
3896	u16 count = 0;
3897
3898	mutex_lock(lock);
3899	for_each_clear_bit(bit, pf_qmap, size)
3900		count++;
3901	mutex_unlock(lock);
3902
3903	return count;
3904}
3905
3906/**
3907 * ice_get_avail_txq_count - Get count of Tx queues in use
3908 * @pf: pointer to an ice_pf instance
3909 */
3910u16 ice_get_avail_txq_count(struct ice_pf *pf)
3911{
3912	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3913				     pf->max_pf_txqs);
3914}
3915
3916/**
3917 * ice_get_avail_rxq_count - Get count of Rx queues in use
3918 * @pf: pointer to an ice_pf instance
3919 */
3920u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3921{
3922	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3923				     pf->max_pf_rxqs);
3924}
3925
3926/**
3927 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3928 * @pf: board private structure to initialize
3929 */
3930static void ice_deinit_pf(struct ice_pf *pf)
3931{
3932	ice_service_task_stop(pf);
3933	mutex_destroy(&pf->lag_mutex);
3934	mutex_destroy(&pf->adev_mutex);
3935	mutex_destroy(&pf->sw_mutex);
3936	mutex_destroy(&pf->tc_mutex);
3937	mutex_destroy(&pf->avail_q_mutex);
3938	mutex_destroy(&pf->vfs.table_lock);
3939
3940	if (pf->avail_txqs) {
3941		bitmap_free(pf->avail_txqs);
3942		pf->avail_txqs = NULL;
3943	}
3944
3945	if (pf->avail_rxqs) {
3946		bitmap_free(pf->avail_rxqs);
3947		pf->avail_rxqs = NULL;
3948	}
3949
3950	if (pf->ptp.clock)
3951		ptp_clock_unregister(pf->ptp.clock);
3952}
3953
3954/**
3955 * ice_set_pf_caps - set PFs capability flags
3956 * @pf: pointer to the PF instance
3957 */
3958static void ice_set_pf_caps(struct ice_pf *pf)
3959{
3960	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3961
3962	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3963	if (func_caps->common_cap.rdma)
 
3964		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
 
 
3965	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3966	if (func_caps->common_cap.dcb)
3967		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3968	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3969	if (func_caps->common_cap.sr_iov_1_1) {
3970		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3971		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3972					      ICE_MAX_SRIOV_VFS);
3973	}
3974	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3975	if (func_caps->common_cap.rss_table_size)
3976		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3977
3978	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3979	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3980		u16 unused;
3981
3982		/* ctrl_vsi_idx will be set to a valid value when flow director
3983		 * is setup by ice_init_fdir
3984		 */
3985		pf->ctrl_vsi_idx = ICE_NO_VSI;
3986		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3987		/* force guaranteed filter pool for PF */
3988		ice_alloc_fd_guar_item(&pf->hw, &unused,
3989				       func_caps->fd_fltr_guar);
3990		/* force shared filter pool for PF */
3991		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3992				       func_caps->fd_fltr_best_effort);
3993	}
3994
3995	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3996	if (func_caps->common_cap.ieee_1588 &&
3997	    !(pf->hw.mac_type == ICE_MAC_E830))
3998		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3999
4000	pf->max_pf_txqs = func_caps->common_cap.num_txq;
4001	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4002}
4003
4004/**
4005 * ice_init_pf - Initialize general software structures (struct ice_pf)
4006 * @pf: board private structure to initialize
4007 */
4008static int ice_init_pf(struct ice_pf *pf)
4009{
4010	ice_set_pf_caps(pf);
4011
4012	mutex_init(&pf->sw_mutex);
4013	mutex_init(&pf->tc_mutex);
4014	mutex_init(&pf->adev_mutex);
4015	mutex_init(&pf->lag_mutex);
4016
4017	INIT_HLIST_HEAD(&pf->aq_wait_list);
4018	spin_lock_init(&pf->aq_wait_lock);
4019	init_waitqueue_head(&pf->aq_wait_queue);
4020
4021	init_waitqueue_head(&pf->reset_wait_queue);
4022
4023	/* setup service timer and periodic service task */
4024	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4025	pf->serv_tmr_period = HZ;
4026	INIT_WORK(&pf->serv_task, ice_service_task);
4027	clear_bit(ICE_SERVICE_SCHED, pf->state);
4028
4029	mutex_init(&pf->avail_q_mutex);
4030	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4031	if (!pf->avail_txqs)
4032		return -ENOMEM;
4033
4034	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4035	if (!pf->avail_rxqs) {
4036		bitmap_free(pf->avail_txqs);
4037		pf->avail_txqs = NULL;
4038		return -ENOMEM;
4039	}
4040
4041	mutex_init(&pf->vfs.table_lock);
4042	hash_init(pf->vfs.table);
4043	ice_mbx_init_snapshot(&pf->hw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4044
4045	return 0;
4046}
4047
4048/**
4049 * ice_is_wol_supported - check if WoL is supported
4050 * @hw: pointer to hardware info
4051 *
4052 * Check if WoL is supported based on the HW configuration.
4053 * Returns true if NVM supports and enables WoL for this port, false otherwise
4054 */
4055bool ice_is_wol_supported(struct ice_hw *hw)
4056{
4057	u16 wol_ctrl;
4058
4059	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4060	 * word) indicates WoL is not supported on the corresponding PF ID.
4061	 */
4062	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4063		return false;
4064
4065	return !(BIT(hw->port_info->lport) & wol_ctrl);
4066}
4067
4068/**
4069 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4070 * @vsi: VSI being changed
4071 * @new_rx: new number of Rx queues
4072 * @new_tx: new number of Tx queues
4073 * @locked: is adev device_lock held
4074 *
4075 * Only change the number of queues if new_tx, or new_rx is non-0.
4076 *
4077 * Returns 0 on success.
4078 */
4079int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4080{
4081	struct ice_pf *pf = vsi->back;
4082	int err = 0, timeout = 50;
4083
4084	if (!new_rx && !new_tx)
4085		return -EINVAL;
4086
4087	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4088		timeout--;
4089		if (!timeout)
4090			return -EBUSY;
4091		usleep_range(1000, 2000);
4092	}
4093
4094	if (new_tx)
4095		vsi->req_txq = (u16)new_tx;
4096	if (new_rx)
4097		vsi->req_rxq = (u16)new_rx;
4098
4099	/* set for the next time the netdev is started */
4100	if (!netif_running(vsi->netdev)) {
4101		ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4102		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4103		goto done;
4104	}
4105
4106	ice_vsi_close(vsi);
4107	ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4108	ice_pf_dcb_recfg(pf, locked);
4109	ice_vsi_open(vsi);
4110done:
4111	clear_bit(ICE_CFG_BUSY, pf->state);
4112	return err;
4113}
4114
4115/**
4116 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4117 * @pf: PF to configure
4118 *
4119 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4120 * VSI can still Tx/Rx VLAN tagged packets.
4121 */
4122static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4123{
4124	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4125	struct ice_vsi_ctx *ctxt;
 
4126	struct ice_hw *hw;
4127	int status;
4128
4129	if (!vsi)
4130		return;
4131
4132	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4133	if (!ctxt)
4134		return;
4135
4136	hw = &pf->hw;
4137	ctxt->info = vsi->info;
4138
4139	ctxt->info.valid_sections =
4140		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4141			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4142			    ICE_AQ_VSI_PROP_SW_VALID);
4143
4144	/* disable VLAN anti-spoof */
4145	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4146				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4147
4148	/* disable VLAN pruning and keep all other settings */
4149	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4150
4151	/* allow all VLANs on Tx and don't strip on Rx */
4152	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4153		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4154
4155	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4156	if (status) {
4157		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4158			status, ice_aq_str(hw->adminq.sq_last_status));
 
4159	} else {
4160		vsi->info.sec_flags = ctxt->info.sec_flags;
4161		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4162		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4163	}
4164
4165	kfree(ctxt);
4166}
4167
4168/**
4169 * ice_log_pkg_init - log result of DDP package load
4170 * @hw: pointer to hardware info
4171 * @state: state of package load
4172 */
4173static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
 
4174{
4175	struct ice_pf *pf = hw->back;
4176	struct device *dev;
4177
4178	dev = ice_pf_to_dev(pf);
4179
4180	switch (state) {
4181	case ICE_DDP_PKG_SUCCESS:
4182		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4183			 hw->active_pkg_name,
4184			 hw->active_pkg_ver.major,
4185			 hw->active_pkg_ver.minor,
4186			 hw->active_pkg_ver.update,
4187			 hw->active_pkg_ver.draft);
4188		break;
4189	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4190		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4191			 hw->active_pkg_name,
4192			 hw->active_pkg_ver.major,
4193			 hw->active_pkg_ver.minor,
4194			 hw->active_pkg_ver.update,
4195			 hw->active_pkg_ver.draft);
4196		break;
4197	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4198		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4199			hw->active_pkg_name,
4200			hw->active_pkg_ver.major,
4201			hw->active_pkg_ver.minor,
4202			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4203		break;
4204	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4205		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4206			 hw->active_pkg_name,
4207			 hw->active_pkg_ver.major,
4208			 hw->active_pkg_ver.minor,
4209			 hw->active_pkg_ver.update,
4210			 hw->active_pkg_ver.draft,
4211			 hw->pkg_name,
4212			 hw->pkg_ver.major,
4213			 hw->pkg_ver.minor,
4214			 hw->pkg_ver.update,
4215			 hw->pkg_ver.draft);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4216		break;
4217	case ICE_DDP_PKG_FW_MISMATCH:
4218		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4219		break;
4220	case ICE_DDP_PKG_INVALID_FILE:
 
4221		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4222		break;
4223	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4224		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4225		break;
4226	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4227		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4228			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4229		break;
4230	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4231		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4232		break;
4233	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4234		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4235		break;
4236	case ICE_DDP_PKG_LOAD_ERROR:
4237		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4238		/* poll for reset to complete */
4239		if (ice_check_reset(hw))
4240			dev_err(dev, "Error resetting device. Please reload the driver\n");
4241		break;
4242	case ICE_DDP_PKG_ERR:
 
 
 
 
 
 
 
 
 
 
 
 
4243	default:
4244		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
 
4245		break;
4246	}
4247}
4248
4249/**
4250 * ice_load_pkg - load/reload the DDP Package file
4251 * @firmware: firmware structure when firmware requested or NULL for reload
4252 * @pf: pointer to the PF instance
4253 *
4254 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4255 * initialize HW tables.
4256 */
4257static void
4258ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4259{
4260	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4261	struct device *dev = ice_pf_to_dev(pf);
4262	struct ice_hw *hw = &pf->hw;
4263
4264	/* Load DDP Package */
4265	if (firmware && !hw->pkg_copy) {
4266		state = ice_copy_and_init_pkg(hw, firmware->data,
4267					      firmware->size);
4268		ice_log_pkg_init(hw, state);
4269	} else if (!firmware && hw->pkg_copy) {
4270		/* Reload package during rebuild after CORER/GLOBR reset */
4271		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4272		ice_log_pkg_init(hw, state);
4273	} else {
4274		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4275	}
4276
4277	if (!ice_is_init_pkg_successful(state)) {
4278		/* Safe Mode */
4279		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4280		return;
4281	}
4282
4283	/* Successful download package is the precondition for advanced
4284	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4285	 */
4286	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4287}
4288
4289/**
4290 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4291 * @pf: pointer to the PF structure
4292 *
4293 * There is no error returned here because the driver should be able to handle
4294 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4295 * specifically with Tx.
4296 */
4297static void ice_verify_cacheline_size(struct ice_pf *pf)
4298{
4299	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4300		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4301			 ICE_CACHE_LINE_BYTES);
4302}
4303
4304/**
4305 * ice_send_version - update firmware with driver version
4306 * @pf: PF struct
4307 *
4308 * Returns 0 on success, else error code
4309 */
4310static int ice_send_version(struct ice_pf *pf)
4311{
4312	struct ice_driver_ver dv;
4313
4314	dv.major_ver = 0xff;
4315	dv.minor_ver = 0xff;
4316	dv.build_ver = 0xff;
4317	dv.subbuild_ver = 0;
4318	strscpy((char *)dv.driver_string, UTS_RELEASE,
4319		sizeof(dv.driver_string));
4320	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4321}
4322
4323/**
4324 * ice_init_fdir - Initialize flow director VSI and configuration
4325 * @pf: pointer to the PF instance
4326 *
4327 * returns 0 on success, negative on error
4328 */
4329static int ice_init_fdir(struct ice_pf *pf)
4330{
4331	struct device *dev = ice_pf_to_dev(pf);
4332	struct ice_vsi *ctrl_vsi;
4333	int err;
4334
4335	/* Side Band Flow Director needs to have a control VSI.
4336	 * Allocate it and store it in the PF.
4337	 */
4338	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4339	if (!ctrl_vsi) {
4340		dev_dbg(dev, "could not create control VSI\n");
4341		return -ENOMEM;
4342	}
4343
4344	err = ice_vsi_open_ctrl(ctrl_vsi);
4345	if (err) {
4346		dev_dbg(dev, "could not open control VSI\n");
4347		goto err_vsi_open;
4348	}
4349
4350	mutex_init(&pf->hw.fdir_fltr_lock);
4351
4352	err = ice_fdir_create_dflt_rules(pf);
4353	if (err)
4354		goto err_fdir_rule;
4355
4356	return 0;
4357
4358err_fdir_rule:
4359	ice_fdir_release_flows(&pf->hw);
4360	ice_vsi_close(ctrl_vsi);
4361err_vsi_open:
4362	ice_vsi_release(ctrl_vsi);
4363	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4364		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4365		pf->ctrl_vsi_idx = ICE_NO_VSI;
4366	}
4367	return err;
4368}
4369
4370static void ice_deinit_fdir(struct ice_pf *pf)
4371{
4372	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4373
4374	if (!vsi)
4375		return;
4376
4377	ice_vsi_manage_fdir(vsi, false);
4378	ice_vsi_release(vsi);
4379	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4380		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4381		pf->ctrl_vsi_idx = ICE_NO_VSI;
4382	}
4383
4384	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4385}
4386
4387/**
4388 * ice_get_opt_fw_name - return optional firmware file name or NULL
4389 * @pf: pointer to the PF instance
4390 */
4391static char *ice_get_opt_fw_name(struct ice_pf *pf)
4392{
4393	/* Optional firmware name same as default with additional dash
4394	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4395	 */
4396	struct pci_dev *pdev = pf->pdev;
4397	char *opt_fw_filename;
4398	u64 dsn;
4399
4400	/* Determine the name of the optional file using the DSN (two
4401	 * dwords following the start of the DSN Capability).
4402	 */
4403	dsn = pci_get_dsn(pdev);
4404	if (!dsn)
4405		return NULL;
4406
4407	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4408	if (!opt_fw_filename)
4409		return NULL;
4410
4411	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4412		 ICE_DDP_PKG_PATH, dsn);
4413
4414	return opt_fw_filename;
4415}
4416
4417/**
4418 * ice_request_fw - Device initialization routine
4419 * @pf: pointer to the PF instance
4420 */
4421static void ice_request_fw(struct ice_pf *pf)
4422{
4423	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4424	const struct firmware *firmware = NULL;
4425	struct device *dev = ice_pf_to_dev(pf);
4426	int err = 0;
4427
4428	/* optional device-specific DDP (if present) overrides the default DDP
4429	 * package file. kernel logs a debug message if the file doesn't exist,
4430	 * and warning messages for other errors.
4431	 */
4432	if (opt_fw_filename) {
4433		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4434		if (err) {
4435			kfree(opt_fw_filename);
4436			goto dflt_pkg_load;
4437		}
4438
4439		/* request for firmware was successful. Download to device */
4440		ice_load_pkg(firmware, pf);
4441		kfree(opt_fw_filename);
4442		release_firmware(firmware);
4443		return;
4444	}
4445
4446dflt_pkg_load:
4447	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4448	if (err) {
4449		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4450		return;
4451	}
4452
4453	/* request for firmware was successful. Download to device */
4454	ice_load_pkg(firmware, pf);
4455	release_firmware(firmware);
4456}
4457
4458/**
4459 * ice_print_wake_reason - show the wake up cause in the log
4460 * @pf: pointer to the PF struct
4461 */
4462static void ice_print_wake_reason(struct ice_pf *pf)
4463{
4464	u32 wus = pf->wakeup_reason;
4465	const char *wake_str;
4466
4467	/* if no wake event, nothing to print */
4468	if (!wus)
4469		return;
4470
4471	if (wus & PFPM_WUS_LNKC_M)
4472		wake_str = "Link\n";
4473	else if (wus & PFPM_WUS_MAG_M)
4474		wake_str = "Magic Packet\n";
4475	else if (wus & PFPM_WUS_MNG_M)
4476		wake_str = "Management\n";
4477	else if (wus & PFPM_WUS_FW_RST_WK_M)
4478		wake_str = "Firmware Reset\n";
4479	else
4480		wake_str = "Unknown\n";
4481
4482	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4483}
4484
4485/**
4486 * ice_pf_fwlog_update_module - update 1 module
4487 * @pf: pointer to the PF struct
4488 * @log_level: log_level to use for the @module
4489 * @module: module to update
4490 */
4491void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
4492{
4493	struct ice_hw *hw = &pf->hw;
4494
4495	hw->fwlog_cfg.module_entries[module].log_level = log_level;
4496}
4497
4498/**
4499 * ice_register_netdev - register netdev
4500 * @vsi: pointer to the VSI struct
4501 */
4502static int ice_register_netdev(struct ice_vsi *vsi)
4503{
4504	int err;
4505
 
4506	if (!vsi || !vsi->netdev)
4507		return -EIO;
4508
4509	err = register_netdev(vsi->netdev);
4510	if (err)
4511		return err;
4512
4513	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4514	netif_carrier_off(vsi->netdev);
4515	netif_tx_stop_all_queues(vsi->netdev);
 
 
 
4516
4517	return 0;
4518}
4519
4520static void ice_unregister_netdev(struct ice_vsi *vsi)
4521{
4522	if (!vsi || !vsi->netdev)
4523		return;
4524
 
 
4525	unregister_netdev(vsi->netdev);
4526	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
 
 
 
 
 
4527}
4528
4529/**
4530 * ice_cfg_netdev - Allocate, configure and register a netdev
4531 * @vsi: the VSI associated with the new netdev
 
4532 *
4533 * Returns 0 on success, negative value on failure
4534 */
4535static int ice_cfg_netdev(struct ice_vsi *vsi)
 
4536{
4537	struct ice_netdev_priv *np;
4538	struct net_device *netdev;
4539	u8 mac_addr[ETH_ALEN];
 
4540
4541	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4542				    vsi->alloc_rxq);
4543	if (!netdev)
4544		return -ENOMEM;
4545
4546	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4547	vsi->netdev = netdev;
4548	np = netdev_priv(netdev);
4549	np->vsi = vsi;
 
 
4550
4551	ice_set_netdev_features(netdev);
4552	ice_set_ops(vsi);
 
 
 
4553
4554	if (vsi->type == ICE_VSI_PF) {
4555		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4556		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4557		eth_hw_addr_set(netdev, mac_addr);
 
 
 
 
 
 
 
 
 
 
4558	}
4559
4560	netdev->priv_flags |= IFF_UNICAST_FLT;
 
4561
4562	/* Setup netdev TC information */
4563	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
 
 
 
4564
4565	netdev->max_mtu = ICE_MAX_MTU;
 
 
4566
4567	return 0;
4568}
 
 
 
 
 
 
 
4569
4570static void ice_decfg_netdev(struct ice_vsi *vsi)
4571{
4572	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4573	free_netdev(vsi->netdev);
4574	vsi->netdev = NULL;
4575}
4576
4577/**
4578 * ice_wait_for_fw - wait for full FW readiness
4579 * @hw: pointer to the hardware structure
4580 * @timeout: milliseconds that can elapse before timing out
4581 */
4582static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4583{
4584	int fw_loading;
4585	u32 elapsed = 0;
4586
4587	while (elapsed <= timeout) {
4588		fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4589
4590		/* firmware was not yet loaded, we have to wait more */
4591		if (fw_loading) {
4592			elapsed += 100;
4593			msleep(100);
4594			continue;
4595		}
4596		return 0;
4597	}
4598
4599	return -ETIMEDOUT;
4600}
4601
4602int ice_init_dev(struct ice_pf *pf)
4603{
4604	struct device *dev = ice_pf_to_dev(pf);
4605	struct ice_hw *hw = &pf->hw;
4606	int err;
4607
4608	err = ice_init_hw(hw);
4609	if (err) {
4610		dev_err(dev, "ice_init_hw failed: %d\n", err);
4611		return err;
4612	}
4613
4614	/* Some cards require longer initialization times
4615	 * due to necessity of loading FW from an external source.
4616	 * This can take even half a minute.
4617	 */
4618	if (ice_is_pf_c827(hw)) {
4619		err = ice_wait_for_fw(hw, 30000);
4620		if (err) {
4621			dev_err(dev, "ice_wait_for_fw timed out");
4622			return err;
4623		}
4624	}
4625
4626	ice_init_feature_support(pf);
4627
4628	ice_request_fw(pf);
4629
4630	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4631	 * set in pf->state, which will cause ice_is_safe_mode to return
4632	 * true
4633	 */
4634	if (ice_is_safe_mode(pf)) {
 
4635		/* we already got function/device capabilities but these don't
4636		 * reflect what the driver needs to do in safe mode. Instead of
4637		 * adding conditional logic everywhere to ignore these
4638		 * device/function capabilities, override them.
4639		 */
4640		ice_set_safe_mode_caps(hw);
4641	}
4642
4643	err = ice_init_pf(pf);
4644	if (err) {
4645		dev_err(dev, "ice_init_pf failed: %d\n", err);
4646		goto err_init_pf;
4647	}
4648
 
 
4649	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4650	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4651	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4652	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
 
4653	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4654		pf->hw.udp_tunnel_nic.tables[0].n_entries =
4655			pf->hw.tnl.valid_count[TNL_VXLAN];
4656		pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4657			UDP_TUNNEL_TYPE_VXLAN;
 
4658	}
4659	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4660		pf->hw.udp_tunnel_nic.tables[1].n_entries =
4661			pf->hw.tnl.valid_count[TNL_GENEVE];
4662		pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4663			UDP_TUNNEL_TYPE_GENEVE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4664	}
4665
4666	err = ice_init_interrupt_scheme(pf);
4667	if (err) {
4668		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4669		err = -EIO;
4670		goto err_init_interrupt_scheme;
4671	}
4672
4673	/* In case of MSIX we are going to setup the misc vector right here
4674	 * to handle admin queue events etc. In case of legacy and MSI
4675	 * the misc functionality and queue processing is combined in
4676	 * the same vector and that gets setup at open.
4677	 */
4678	err = ice_req_irq_msix_misc(pf);
4679	if (err) {
4680		dev_err(dev, "setup of misc vector failed: %d\n", err);
4681		goto err_req_irq_msix_misc;
4682	}
4683
4684	return 0;
4685
4686err_req_irq_msix_misc:
4687	ice_clear_interrupt_scheme(pf);
4688err_init_interrupt_scheme:
4689	ice_deinit_pf(pf);
4690err_init_pf:
4691	ice_deinit_hw(hw);
4692	return err;
4693}
4694
4695void ice_deinit_dev(struct ice_pf *pf)
4696{
4697	ice_free_irq_msix_misc(pf);
4698	ice_deinit_pf(pf);
4699	ice_deinit_hw(&pf->hw);
4700
4701	/* Service task is already stopped, so call reset directly. */
4702	ice_reset(&pf->hw, ICE_RESET_PFR);
4703	pci_wait_for_pending_transaction(pf->pdev);
4704	ice_clear_interrupt_scheme(pf);
4705}
4706
4707static void ice_init_features(struct ice_pf *pf)
4708{
4709	struct device *dev = ice_pf_to_dev(pf);
4710
4711	if (ice_is_safe_mode(pf))
4712		return;
4713
4714	/* initialize DDP driven features */
4715	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4716		ice_ptp_init(pf);
4717
4718	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4719		ice_gnss_init(pf);
4720
4721	if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4722	    ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4723		ice_dpll_init(pf);
4724
4725	/* Note: Flow director init failure is non-fatal to load */
4726	if (ice_init_fdir(pf))
4727		dev_err(dev, "could not initialize flow director\n");
4728
4729	/* Note: DCB init failure is non-fatal to load */
4730	if (ice_init_pf_dcb(pf, false)) {
4731		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4732		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4733	} else {
4734		ice_cfg_lldp_mib_change(&pf->hw, true);
4735	}
4736
4737	if (ice_init_lag(pf))
4738		dev_warn(dev, "Failed to init link aggregation support\n");
4739
4740	ice_hwmon_init(pf);
4741}
4742
4743static void ice_deinit_features(struct ice_pf *pf)
4744{
4745	if (ice_is_safe_mode(pf))
4746		return;
4747
4748	ice_deinit_lag(pf);
4749	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4750		ice_cfg_lldp_mib_change(&pf->hw, false);
4751	ice_deinit_fdir(pf);
4752	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4753		ice_gnss_exit(pf);
4754	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4755		ice_ptp_release(pf);
4756	if (test_bit(ICE_FLAG_DPLL, pf->flags))
4757		ice_dpll_deinit(pf);
4758	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4759		xa_destroy(&pf->eswitch.reprs);
4760}
4761
4762static void ice_init_wakeup(struct ice_pf *pf)
4763{
4764	/* Save wakeup reason register for later use */
4765	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4766
4767	/* check for a power management event */
4768	ice_print_wake_reason(pf);
 
 
 
4769
4770	/* clear wake status, all bits */
4771	wr32(&pf->hw, PFPM_WUS, U32_MAX);
4772
4773	/* Disable WoL at init, wait for user to enable */
4774	device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4775}
 
 
 
 
4776
4777static int ice_init_link(struct ice_pf *pf)
4778{
4779	struct device *dev = ice_pf_to_dev(pf);
4780	int err;
4781
4782	err = ice_init_link_events(pf->hw.port_info);
4783	if (err) {
4784		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4785		return err;
4786	}
4787
4788	/* not a fatal error if this fails */
4789	err = ice_init_nvm_phy_type(pf->hw.port_info);
4790	if (err)
4791		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4792
4793	/* not a fatal error if this fails */
4794	err = ice_update_link_info(pf->hw.port_info);
4795	if (err)
4796		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4797
4798	ice_init_link_dflt_override(pf->hw.port_info);
4799
4800	ice_check_link_cfg_err(pf,
4801			       pf->hw.port_info->phy.link_info.link_cfg_err);
4802
4803	/* if media available, initialize PHY settings */
4804	if (pf->hw.port_info->phy.link_info.link_info &
4805	    ICE_AQ_MEDIA_AVAILABLE) {
4806		/* not a fatal error if this fails */
4807		err = ice_init_phy_user_cfg(pf->hw.port_info);
4808		if (err)
4809			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4810
4811		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4812			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4813
4814			if (vsi)
4815				ice_configure_phy(vsi);
4816		}
4817	} else {
4818		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4819	}
4820
4821	return err;
4822}
4823
4824static int ice_init_pf_sw(struct ice_pf *pf)
4825{
4826	bool dvm = ice_is_dvm_ena(&pf->hw);
4827	struct ice_vsi *vsi;
4828	int err;
4829
4830	/* create switch struct for the switch element created by FW on boot */
4831	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4832	if (!pf->first_sw)
4833		return -ENOMEM;
4834
4835	if (pf->hw.evb_veb)
4836		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4837	else
4838		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4839
4840	pf->first_sw->pf = pf;
 
4841
4842	/* record the sw_id available for later use */
4843	pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4844
4845	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4846	if (err)
4847		goto err_aq_set_port_params;
4848
4849	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4850	if (!vsi) {
4851		err = -ENOMEM;
4852		goto err_pf_vsi_setup;
4853	}
4854
4855	return 0;
4856
4857err_pf_vsi_setup:
4858err_aq_set_port_params:
4859	kfree(pf->first_sw);
4860	return err;
4861}
4862
4863static void ice_deinit_pf_sw(struct ice_pf *pf)
4864{
4865	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4866
4867	if (!vsi)
4868		return;
4869
4870	ice_vsi_release(vsi);
4871	kfree(pf->first_sw);
4872}
4873
4874static int ice_alloc_vsis(struct ice_pf *pf)
4875{
4876	struct device *dev = ice_pf_to_dev(pf);
4877
4878	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4879	if (!pf->num_alloc_vsi)
4880		return -EIO;
4881
4882	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4883		dev_warn(dev,
4884			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4885			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4886		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4887	}
4888
4889	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4890			       GFP_KERNEL);
4891	if (!pf->vsi)
4892		return -ENOMEM;
4893
4894	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4895				     sizeof(*pf->vsi_stats), GFP_KERNEL);
4896	if (!pf->vsi_stats) {
4897		devm_kfree(dev, pf->vsi);
4898		return -ENOMEM;
 
4899	}
4900
4901	return 0;
4902}
4903
4904static void ice_dealloc_vsis(struct ice_pf *pf)
4905{
4906	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4907	pf->vsi_stats = NULL;
4908
4909	pf->num_alloc_vsi = 0;
4910	devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4911	pf->vsi = NULL;
4912}
4913
4914static int ice_init_devlink(struct ice_pf *pf)
4915{
4916	int err;
4917
4918	err = ice_devlink_register_params(pf);
4919	if (err)
4920		return err;
4921
4922	ice_devlink_init_regions(pf);
4923	ice_devlink_register(pf);
4924
4925	return 0;
4926}
4927
4928static void ice_deinit_devlink(struct ice_pf *pf)
4929{
4930	ice_devlink_unregister(pf);
4931	ice_devlink_destroy_regions(pf);
4932	ice_devlink_unregister_params(pf);
4933}
4934
4935static int ice_init(struct ice_pf *pf)
4936{
4937	int err;
4938
4939	err = ice_init_dev(pf);
4940	if (err)
4941		return err;
4942
4943	err = ice_alloc_vsis(pf);
4944	if (err)
4945		goto err_alloc_vsis;
4946
4947	err = ice_init_pf_sw(pf);
4948	if (err)
4949		goto err_init_pf_sw;
4950
4951	ice_init_wakeup(pf);
 
4952
4953	err = ice_init_link(pf);
 
4954	if (err)
4955		goto err_init_link;
4956
4957	err = ice_send_version(pf);
4958	if (err)
4959		goto err_init_link;
4960
4961	ice_verify_cacheline_size(pf);
4962
4963	if (ice_is_safe_mode(pf))
4964		ice_set_safe_mode_vlan_cfg(pf);
4965	else
4966		/* print PCI link speed and width */
4967		pcie_print_link_status(pf->pdev);
4968
4969	/* ready to go, so clear down state bit */
4970	clear_bit(ICE_DOWN, pf->state);
4971	clear_bit(ICE_SERVICE_DIS, pf->state);
 
 
 
 
 
 
4972
4973	/* since everything is good, start the service timer */
4974	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
 
 
 
 
 
 
 
4975
4976	return 0;
4977
4978err_init_link:
4979	ice_deinit_pf_sw(pf);
4980err_init_pf_sw:
4981	ice_dealloc_vsis(pf);
4982err_alloc_vsis:
4983	ice_deinit_dev(pf);
4984	return err;
4985}
4986
4987static void ice_deinit(struct ice_pf *pf)
4988{
4989	set_bit(ICE_SERVICE_DIS, pf->state);
4990	set_bit(ICE_DOWN, pf->state);
4991
4992	ice_deinit_pf_sw(pf);
4993	ice_dealloc_vsis(pf);
4994	ice_deinit_dev(pf);
4995}
4996
4997/**
4998 * ice_load - load pf by init hw and starting VSI
4999 * @pf: pointer to the pf instance
5000 *
5001 * This function has to be called under devl_lock.
5002 */
5003int ice_load(struct ice_pf *pf)
5004{
5005	struct ice_vsi *vsi;
5006	int err;
5007
5008	devl_assert_locked(priv_to_devlink(pf));
5009
5010	vsi = ice_get_main_vsi(pf);
5011
5012	/* init channel list */
5013	INIT_LIST_HEAD(&vsi->ch_list);
5014
5015	err = ice_cfg_netdev(vsi);
5016	if (err)
5017		return err;
5018
5019	/* Setup DCB netlink interface */
5020	ice_dcbnl_setup(vsi);
5021
5022	err = ice_init_mac_fltr(pf);
5023	if (err)
5024		goto err_init_mac_fltr;
5025
5026	err = ice_devlink_create_pf_port(pf);
5027	if (err)
5028		goto err_devlink_create_pf_port;
5029
5030	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5031
5032	err = ice_register_netdev(vsi);
5033	if (err)
5034		goto err_register_netdev;
5035
5036	err = ice_tc_indir_block_register(vsi);
5037	if (err)
5038		goto err_tc_indir_block_register;
5039
5040	ice_napi_add(vsi);
5041
5042	err = ice_init_rdma(pf);
5043	if (err)
5044		goto err_init_rdma;
5045
5046	ice_init_features(pf);
5047	ice_service_task_restart(pf);
5048
5049	clear_bit(ICE_DOWN, pf->state);
5050
5051	return 0;
5052
5053err_init_rdma:
5054	ice_tc_indir_block_unregister(vsi);
5055err_tc_indir_block_register:
5056	ice_unregister_netdev(vsi);
5057err_register_netdev:
5058	ice_devlink_destroy_pf_port(pf);
5059err_devlink_create_pf_port:
5060err_init_mac_fltr:
5061	ice_decfg_netdev(vsi);
5062	return err;
5063}
5064
5065/**
5066 * ice_unload - unload pf by stopping VSI and deinit hw
5067 * @pf: pointer to the pf instance
5068 *
5069 * This function has to be called under devl_lock.
5070 */
5071void ice_unload(struct ice_pf *pf)
5072{
5073	struct ice_vsi *vsi = ice_get_main_vsi(pf);
5074
5075	devl_assert_locked(priv_to_devlink(pf));
5076
5077	ice_deinit_features(pf);
5078	ice_deinit_rdma(pf);
5079	ice_tc_indir_block_unregister(vsi);
5080	ice_unregister_netdev(vsi);
5081	ice_devlink_destroy_pf_port(pf);
5082	ice_decfg_netdev(vsi);
5083}
5084
5085/**
5086 * ice_probe - Device initialization routine
5087 * @pdev: PCI device information struct
5088 * @ent: entry in ice_pci_tbl
5089 *
5090 * Returns 0 on success, negative on failure
5091 */
5092static int
5093ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5094{
5095	struct device *dev = &pdev->dev;
5096	struct ice_pf *pf;
5097	struct ice_hw *hw;
5098	int err;
5099
5100	if (pdev->is_virtfn) {
5101		dev_err(dev, "can't probe a virtual function\n");
5102		return -EINVAL;
5103	}
5104
5105	/* when under a kdump kernel initiate a reset before enabling the
5106	 * device in order to clear out any pending DMA transactions. These
5107	 * transactions can cause some systems to machine check when doing
5108	 * the pcim_enable_device() below.
5109	 */
5110	if (is_kdump_kernel()) {
5111		pci_save_state(pdev);
5112		pci_clear_master(pdev);
5113		err = pcie_flr(pdev);
5114		if (err)
5115			return err;
5116		pci_restore_state(pdev);
5117	}
5118
5119	/* this driver uses devres, see
5120	 * Documentation/driver-api/driver-model/devres.rst
5121	 */
5122	err = pcim_enable_device(pdev);
5123	if (err)
5124		return err;
5125
5126	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5127	if (err) {
5128		dev_err(dev, "BAR0 I/O map error %d\n", err);
5129		return err;
5130	}
5131
5132	pf = ice_allocate_pf(dev);
5133	if (!pf)
5134		return -ENOMEM;
5135
5136	/* initialize Auxiliary index to invalid value */
5137	pf->aux_idx = -1;
5138
5139	/* set up for high or low DMA */
5140	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5141	if (err) {
5142		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5143		return err;
5144	}
5145
5146	pci_set_master(pdev);
5147
5148	pf->pdev = pdev;
5149	pci_set_drvdata(pdev, pf);
5150	set_bit(ICE_DOWN, pf->state);
5151	/* Disable service task until DOWN bit is cleared */
5152	set_bit(ICE_SERVICE_DIS, pf->state);
5153
5154	hw = &pf->hw;
5155	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5156	pci_save_state(pdev);
5157
5158	hw->back = pf;
5159	hw->port_info = NULL;
5160	hw->vendor_id = pdev->vendor;
5161	hw->device_id = pdev->device;
5162	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5163	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5164	hw->subsystem_device_id = pdev->subsystem_device;
5165	hw->bus.device = PCI_SLOT(pdev->devfn);
5166	hw->bus.func = PCI_FUNC(pdev->devfn);
5167	ice_set_ctrlq_len(hw);
5168
5169	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5170
5171#ifndef CONFIG_DYNAMIC_DEBUG
5172	if (debug < -1)
5173		hw->debug_mask = debug;
5174#endif
5175
5176	err = ice_init(pf);
5177	if (err)
5178		goto err_init;
5179
5180	devl_lock(priv_to_devlink(pf));
5181	err = ice_load(pf);
5182	devl_unlock(priv_to_devlink(pf));
5183	if (err)
5184		goto err_load;
5185
5186	err = ice_init_devlink(pf);
5187	if (err)
5188		goto err_init_devlink;
5189
5190	return 0;
5191
5192err_init_devlink:
5193	devl_lock(priv_to_devlink(pf));
5194	ice_unload(pf);
5195	devl_unlock(priv_to_devlink(pf));
5196err_load:
5197	ice_deinit(pf);
5198err_init:
5199	pci_disable_device(pdev);
5200	return err;
5201}
5202
5203/**
5204 * ice_set_wake - enable or disable Wake on LAN
5205 * @pf: pointer to the PF struct
5206 *
5207 * Simple helper for WoL control
5208 */
5209static void ice_set_wake(struct ice_pf *pf)
5210{
5211	struct ice_hw *hw = &pf->hw;
5212	bool wol = pf->wol_ena;
5213
5214	/* clear wake state, otherwise new wake events won't fire */
5215	wr32(hw, PFPM_WUS, U32_MAX);
5216
5217	/* enable / disable APM wake up, no RMW needed */
5218	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5219
5220	/* set magic packet filter enabled */
5221	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5222}
5223
5224/**
5225 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5226 * @pf: pointer to the PF struct
5227 *
5228 * Issue firmware command to enable multicast magic wake, making
5229 * sure that any locally administered address (LAA) is used for
5230 * wake, and that PF reset doesn't undo the LAA.
5231 */
5232static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5233{
5234	struct device *dev = ice_pf_to_dev(pf);
5235	struct ice_hw *hw = &pf->hw;
 
5236	u8 mac_addr[ETH_ALEN];
5237	struct ice_vsi *vsi;
5238	int status;
5239	u8 flags;
5240
5241	if (!pf->wol_ena)
5242		return;
5243
5244	vsi = ice_get_main_vsi(pf);
5245	if (!vsi)
5246		return;
5247
5248	/* Get current MAC address in case it's an LAA */
5249	if (vsi->netdev)
5250		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5251	else
5252		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5253
5254	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5255		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5256		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5257
5258	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5259	if (status)
5260		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5261			status, ice_aq_str(hw->adminq.sq_last_status));
 
5262}
5263
5264/**
5265 * ice_remove - Device removal routine
5266 * @pdev: PCI device information struct
5267 */
5268static void ice_remove(struct pci_dev *pdev)
5269{
5270	struct ice_pf *pf = pci_get_drvdata(pdev);
5271	int i;
5272
 
 
 
5273	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5274		if (!ice_is_reset_in_progress(pf->state))
5275			break;
5276		msleep(100);
5277	}
5278
5279	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5280		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5281		ice_free_vfs(pf);
5282	}
5283
5284	ice_hwmon_exit(pf);
5285
5286	ice_service_task_stop(pf);
 
5287	ice_aq_cancel_waiting_tasks(pf);
 
 
 
5288	set_bit(ICE_DOWN, pf->state);
5289
 
 
 
 
5290	if (!ice_is_safe_mode(pf))
5291		ice_remove_arfs(pf);
5292
5293	ice_deinit_devlink(pf);
5294
5295	devl_lock(priv_to_devlink(pf));
5296	ice_unload(pf);
5297	devl_unlock(priv_to_devlink(pf));
5298
5299	ice_deinit(pf);
5300	ice_vsi_release_all(pf);
5301
5302	ice_setup_mc_magic_wake(pf);
 
5303	ice_set_wake(pf);
 
 
 
 
 
 
 
 
 
 
5304
 
 
 
 
 
 
 
 
5305	pci_disable_device(pdev);
5306}
5307
5308/**
5309 * ice_shutdown - PCI callback for shutting down device
5310 * @pdev: PCI device information struct
5311 */
5312static void ice_shutdown(struct pci_dev *pdev)
5313{
5314	struct ice_pf *pf = pci_get_drvdata(pdev);
5315
5316	ice_remove(pdev);
5317
5318	if (system_state == SYSTEM_POWER_OFF) {
5319		pci_wake_from_d3(pdev, pf->wol_ena);
5320		pci_set_power_state(pdev, PCI_D3hot);
5321	}
5322}
5323
5324#ifdef CONFIG_PM
5325/**
5326 * ice_prepare_for_shutdown - prep for PCI shutdown
5327 * @pf: board private structure
5328 *
5329 * Inform or close all dependent features in prep for PCI device shutdown
5330 */
5331static void ice_prepare_for_shutdown(struct ice_pf *pf)
5332{
5333	struct ice_hw *hw = &pf->hw;
5334	u32 v;
5335
5336	/* Notify VFs of impending reset */
5337	if (ice_check_sq_alive(hw, &hw->mailboxq))
5338		ice_vc_notify_reset(pf);
5339
5340	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5341
5342	/* disable the VSIs and their queues that are not already DOWN */
5343	ice_pf_dis_all_vsi(pf, false);
5344
5345	ice_for_each_vsi(pf, v)
5346		if (pf->vsi[v])
5347			pf->vsi[v]->vsi_num = 0;
5348
5349	ice_shutdown_all_ctrlq(hw);
5350}
5351
5352/**
5353 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5354 * @pf: board private structure to reinitialize
5355 *
5356 * This routine reinitialize interrupt scheme that was cleared during
5357 * power management suspend callback.
5358 *
5359 * This should be called during resume routine to re-allocate the q_vectors
5360 * and reacquire interrupts.
5361 */
5362static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5363{
5364	struct device *dev = ice_pf_to_dev(pf);
5365	int ret, v;
5366
5367	/* Since we clear MSIX flag during suspend, we need to
5368	 * set it back during resume...
5369	 */
5370
5371	ret = ice_init_interrupt_scheme(pf);
5372	if (ret) {
5373		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5374		return ret;
5375	}
5376
5377	/* Remap vectors and rings, after successful re-init interrupts */
5378	ice_for_each_vsi(pf, v) {
5379		if (!pf->vsi[v])
5380			continue;
5381
5382		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5383		if (ret)
5384			goto err_reinit;
5385		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5386		ice_vsi_set_napi_queues(pf->vsi[v]);
5387	}
5388
5389	ret = ice_req_irq_msix_misc(pf);
5390	if (ret) {
5391		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5392			ret);
5393		goto err_reinit;
5394	}
5395
5396	return 0;
5397
5398err_reinit:
5399	while (v--)
5400		if (pf->vsi[v])
5401			ice_vsi_free_q_vectors(pf->vsi[v]);
5402
5403	return ret;
5404}
5405
5406/**
5407 * ice_suspend
5408 * @dev: generic device information structure
5409 *
5410 * Power Management callback to quiesce the device and prepare
5411 * for D3 transition.
5412 */
5413static int __maybe_unused ice_suspend(struct device *dev)
5414{
5415	struct pci_dev *pdev = to_pci_dev(dev);
5416	struct ice_pf *pf;
5417	int disabled, v;
5418
5419	pf = pci_get_drvdata(pdev);
5420
5421	if (!ice_pf_state_is_nominal(pf)) {
5422		dev_err(dev, "Device is not ready, no need to suspend it\n");
5423		return -EBUSY;
5424	}
5425
5426	/* Stop watchdog tasks until resume completion.
5427	 * Even though it is most likely that the service task is
5428	 * disabled if the device is suspended or down, the service task's
5429	 * state is controlled by a different state bit, and we should
5430	 * store and honor whatever state that bit is in at this point.
5431	 */
5432	disabled = ice_service_task_stop(pf);
5433
5434	ice_unplug_aux_dev(pf);
5435
5436	/* Already suspended?, then there is nothing to do */
5437	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5438		if (!disabled)
5439			ice_service_task_restart(pf);
5440		return 0;
5441	}
5442
5443	if (test_bit(ICE_DOWN, pf->state) ||
5444	    ice_is_reset_in_progress(pf->state)) {
5445		dev_err(dev, "can't suspend device in reset or already down\n");
5446		if (!disabled)
5447			ice_service_task_restart(pf);
5448		return 0;
5449	}
5450
5451	ice_setup_mc_magic_wake(pf);
5452
5453	ice_prepare_for_shutdown(pf);
5454
5455	ice_set_wake(pf);
5456
5457	/* Free vectors, clear the interrupt scheme and release IRQs
5458	 * for proper hibernation, especially with large number of CPUs.
5459	 * Otherwise hibernation might fail when mapping all the vectors back
5460	 * to CPU0.
5461	 */
5462	ice_free_irq_msix_misc(pf);
5463	ice_for_each_vsi(pf, v) {
5464		if (!pf->vsi[v])
5465			continue;
5466		ice_vsi_free_q_vectors(pf->vsi[v]);
5467	}
 
5468	ice_clear_interrupt_scheme(pf);
5469
5470	pci_save_state(pdev);
5471	pci_wake_from_d3(pdev, pf->wol_ena);
5472	pci_set_power_state(pdev, PCI_D3hot);
5473	return 0;
5474}
5475
5476/**
5477 * ice_resume - PM callback for waking up from D3
5478 * @dev: generic device information structure
5479 */
5480static int __maybe_unused ice_resume(struct device *dev)
5481{
5482	struct pci_dev *pdev = to_pci_dev(dev);
5483	enum ice_reset_req reset_type;
5484	struct ice_pf *pf;
5485	struct ice_hw *hw;
5486	int ret;
5487
5488	pci_set_power_state(pdev, PCI_D0);
5489	pci_restore_state(pdev);
5490	pci_save_state(pdev);
5491
5492	if (!pci_device_is_present(pdev))
5493		return -ENODEV;
5494
5495	ret = pci_enable_device_mem(pdev);
5496	if (ret) {
5497		dev_err(dev, "Cannot enable device after suspend\n");
5498		return ret;
5499	}
5500
5501	pf = pci_get_drvdata(pdev);
5502	hw = &pf->hw;
5503
5504	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5505	ice_print_wake_reason(pf);
5506
5507	/* We cleared the interrupt scheme when we suspended, so we need to
5508	 * restore it now to resume device functionality.
5509	 */
5510	ret = ice_reinit_interrupt_scheme(pf);
5511	if (ret)
5512		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5513
5514	clear_bit(ICE_DOWN, pf->state);
5515	/* Now perform PF reset and rebuild */
5516	reset_type = ICE_RESET_PFR;
5517	/* re-enable service task for reset, but allow reset to schedule it */
5518	clear_bit(ICE_SERVICE_DIS, pf->state);
5519
5520	if (ice_schedule_reset(pf, reset_type))
5521		dev_err(dev, "Reset during resume failed.\n");
5522
5523	clear_bit(ICE_SUSPENDED, pf->state);
5524	ice_service_task_restart(pf);
5525
5526	/* Restart the service task */
5527	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5528
5529	return 0;
5530}
5531#endif /* CONFIG_PM */
5532
5533/**
5534 * ice_pci_err_detected - warning that PCI error has been detected
5535 * @pdev: PCI device information struct
5536 * @err: the type of PCI error
5537 *
5538 * Called to warn that something happened on the PCI bus and the error handling
5539 * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5540 */
5541static pci_ers_result_t
5542ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5543{
5544	struct ice_pf *pf = pci_get_drvdata(pdev);
5545
5546	if (!pf) {
5547		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5548			__func__, err);
5549		return PCI_ERS_RESULT_DISCONNECT;
5550	}
5551
5552	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5553		ice_service_task_stop(pf);
5554
5555		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5556			set_bit(ICE_PFR_REQ, pf->state);
5557			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5558		}
5559	}
5560
5561	return PCI_ERS_RESULT_NEED_RESET;
5562}
5563
5564/**
5565 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5566 * @pdev: PCI device information struct
5567 *
5568 * Called to determine if the driver can recover from the PCI slot reset by
5569 * using a register read to determine if the device is recoverable.
5570 */
5571static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5572{
5573	struct ice_pf *pf = pci_get_drvdata(pdev);
5574	pci_ers_result_t result;
5575	int err;
5576	u32 reg;
5577
5578	err = pci_enable_device_mem(pdev);
5579	if (err) {
5580		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5581			err);
5582		result = PCI_ERS_RESULT_DISCONNECT;
5583	} else {
5584		pci_set_master(pdev);
5585		pci_restore_state(pdev);
5586		pci_save_state(pdev);
5587		pci_wake_from_d3(pdev, false);
5588
5589		/* Check for life */
5590		reg = rd32(&pf->hw, GLGEN_RTRIG);
5591		if (!reg)
5592			result = PCI_ERS_RESULT_RECOVERED;
5593		else
5594			result = PCI_ERS_RESULT_DISCONNECT;
5595	}
5596
 
 
 
 
 
 
5597	return result;
5598}
5599
5600/**
5601 * ice_pci_err_resume - restart operations after PCI error recovery
5602 * @pdev: PCI device information struct
5603 *
5604 * Called to allow the driver to bring things back up after PCI error and/or
5605 * reset recovery have finished
5606 */
5607static void ice_pci_err_resume(struct pci_dev *pdev)
5608{
5609	struct ice_pf *pf = pci_get_drvdata(pdev);
5610
5611	if (!pf) {
5612		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5613			__func__);
5614		return;
5615	}
5616
5617	if (test_bit(ICE_SUSPENDED, pf->state)) {
5618		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5619			__func__);
5620		return;
5621	}
5622
5623	ice_restore_all_vfs_msi_state(pf);
5624
5625	ice_do_reset(pf, ICE_RESET_PFR);
5626	ice_service_task_restart(pf);
5627	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5628}
5629
5630/**
5631 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5632 * @pdev: PCI device information struct
5633 */
5634static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5635{
5636	struct ice_pf *pf = pci_get_drvdata(pdev);
5637
5638	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5639		ice_service_task_stop(pf);
5640
5641		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5642			set_bit(ICE_PFR_REQ, pf->state);
5643			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5644		}
5645	}
5646}
5647
5648/**
5649 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5650 * @pdev: PCI device information struct
5651 */
5652static void ice_pci_err_reset_done(struct pci_dev *pdev)
5653{
5654	ice_pci_err_resume(pdev);
5655}
5656
5657/* ice_pci_tbl - PCI Device ID Table
5658 *
5659 * Wildcard entries (PCI_ANY_ID) should come last
5660 * Last entry must be all 0s
5661 *
5662 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5663 *   Class, Class Mask, private data (not used) }
5664 */
5665static const struct pci_device_id ice_pci_tbl[] = {
5666	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5667	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5668	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5669	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5670	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5671	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5672	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5673	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5674	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5675	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5676	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5677	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5678	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5679	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5680	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5681	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5682	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5683	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5684	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5685	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5686	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5687	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5688	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5689	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5690	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5691	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5692	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
5693	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
5694	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
5695	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
5696	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
5697	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
5698	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
5699	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) },
5700	/* required last entry */
5701	{}
5702};
5703MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5704
5705static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5706
5707static const struct pci_error_handlers ice_pci_err_handler = {
5708	.error_detected = ice_pci_err_detected,
5709	.slot_reset = ice_pci_err_slot_reset,
5710	.reset_prepare = ice_pci_err_reset_prepare,
5711	.reset_done = ice_pci_err_reset_done,
5712	.resume = ice_pci_err_resume
5713};
5714
5715static struct pci_driver ice_driver = {
5716	.name = KBUILD_MODNAME,
5717	.id_table = ice_pci_tbl,
5718	.probe = ice_probe,
5719	.remove = ice_remove,
5720#ifdef CONFIG_PM
5721	.driver.pm = &ice_pm_ops,
5722#endif /* CONFIG_PM */
5723	.shutdown = ice_shutdown,
5724	.sriov_configure = ice_sriov_configure,
5725	.sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5726	.sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5727	.err_handler = &ice_pci_err_handler
5728};
5729
5730/**
5731 * ice_module_init - Driver registration routine
5732 *
5733 * ice_module_init is the first routine called when the driver is
5734 * loaded. All it does is register with the PCI subsystem.
5735 */
5736static int __init ice_module_init(void)
5737{
5738	int status = -ENOMEM;
5739
5740	pr_info("%s\n", ice_driver_string);
5741	pr_info("%s\n", ice_copyright);
5742
5743	ice_adv_lnk_speed_maps_init();
5744
5745	ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5746	if (!ice_wq) {
5747		pr_err("Failed to create workqueue\n");
5748		return status;
5749	}
5750
5751	ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5752	if (!ice_lag_wq) {
5753		pr_err("Failed to create LAG workqueue\n");
5754		goto err_dest_wq;
5755	}
5756
5757	ice_debugfs_init();
5758
5759	status = pci_register_driver(&ice_driver);
5760	if (status) {
5761		pr_err("failed to register PCI driver, err %d\n", status);
5762		goto err_dest_lag_wq;
5763	}
5764
5765	return 0;
5766
5767err_dest_lag_wq:
5768	destroy_workqueue(ice_lag_wq);
5769	ice_debugfs_exit();
5770err_dest_wq:
5771	destroy_workqueue(ice_wq);
5772	return status;
5773}
5774module_init(ice_module_init);
5775
5776/**
5777 * ice_module_exit - Driver exit cleanup routine
5778 *
5779 * ice_module_exit is called just before the driver is removed
5780 * from memory.
5781 */
5782static void __exit ice_module_exit(void)
5783{
5784	pci_unregister_driver(&ice_driver);
5785	ice_debugfs_exit();
5786	destroy_workqueue(ice_wq);
5787	destroy_workqueue(ice_lag_wq);
5788	pr_info("module unloaded\n");
5789}
5790module_exit(ice_module_exit);
5791
5792/**
5793 * ice_set_mac_address - NDO callback to set MAC address
5794 * @netdev: network interface device structure
5795 * @pi: pointer to an address structure
5796 *
5797 * Returns 0 on success, negative on failure
5798 */
5799static int ice_set_mac_address(struct net_device *netdev, void *pi)
5800{
5801	struct ice_netdev_priv *np = netdev_priv(netdev);
5802	struct ice_vsi *vsi = np->vsi;
5803	struct ice_pf *pf = vsi->back;
5804	struct ice_hw *hw = &pf->hw;
5805	struct sockaddr *addr = pi;
 
5806	u8 old_mac[ETH_ALEN];
5807	u8 flags = 0;
 
5808	u8 *mac;
5809	int err;
5810
5811	mac = (u8 *)addr->sa_data;
5812
5813	if (!is_valid_ether_addr(mac))
5814		return -EADDRNOTAVAIL;
5815
 
 
 
 
 
5816	if (test_bit(ICE_DOWN, pf->state) ||
5817	    ice_is_reset_in_progress(pf->state)) {
5818		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5819			   mac);
5820		return -EBUSY;
5821	}
5822
5823	if (ice_chnl_dmac_fltr_cnt(pf)) {
5824		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5825			   mac);
5826		return -EAGAIN;
5827	}
5828
5829	netif_addr_lock_bh(netdev);
5830	ether_addr_copy(old_mac, netdev->dev_addr);
5831	/* change the netdev's MAC address */
5832	eth_hw_addr_set(netdev, mac);
5833	netif_addr_unlock_bh(netdev);
5834
5835	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5836	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5837	if (err && err != -ENOENT) {
5838		err = -EADDRNOTAVAIL;
5839		goto err_update_filters;
5840	}
5841
5842	/* Add filter for new MAC. If filter exists, return success */
5843	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5844	if (err == -EEXIST) {
5845		/* Although this MAC filter is already present in hardware it's
5846		 * possible in some cases (e.g. bonding) that dev_addr was
5847		 * modified outside of the driver and needs to be restored back
5848		 * to this value.
5849		 */
5850		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5851
5852		return 0;
5853	} else if (err) {
5854		/* error if the new filter addition failed */
5855		err = -EADDRNOTAVAIL;
5856	}
5857
5858err_update_filters:
5859	if (err) {
5860		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5861			   mac);
5862		netif_addr_lock_bh(netdev);
5863		eth_hw_addr_set(netdev, old_mac);
5864		netif_addr_unlock_bh(netdev);
5865		return err;
5866	}
5867
5868	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5869		   netdev->dev_addr);
5870
5871	/* write new MAC address to the firmware */
5872	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5873	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5874	if (err) {
5875		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5876			   mac, err);
5877	}
5878	return 0;
5879}
5880
5881/**
5882 * ice_set_rx_mode - NDO callback to set the netdev filters
5883 * @netdev: network interface device structure
5884 */
5885static void ice_set_rx_mode(struct net_device *netdev)
5886{
5887	struct ice_netdev_priv *np = netdev_priv(netdev);
5888	struct ice_vsi *vsi = np->vsi;
5889
5890	if (!vsi || ice_is_switchdev_running(vsi->back))
5891		return;
5892
5893	/* Set the flags to synchronize filters
5894	 * ndo_set_rx_mode may be triggered even without a change in netdev
5895	 * flags
5896	 */
5897	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5898	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5899	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5900
5901	/* schedule our worker thread which will take care of
5902	 * applying the new filter changes
5903	 */
5904	ice_service_task_schedule(vsi->back);
5905}
5906
5907/**
5908 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5909 * @netdev: network interface device structure
5910 * @queue_index: Queue ID
5911 * @maxrate: maximum bandwidth in Mbps
5912 */
5913static int
5914ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5915{
5916	struct ice_netdev_priv *np = netdev_priv(netdev);
5917	struct ice_vsi *vsi = np->vsi;
 
5918	u16 q_handle;
5919	int status;
5920	u8 tc;
5921
5922	/* Validate maxrate requested is within permitted range */
5923	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5924		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5925			   maxrate, queue_index);
5926		return -EINVAL;
5927	}
5928
5929	q_handle = vsi->tx_rings[queue_index]->q_handle;
5930	tc = ice_dcb_get_tc(vsi, queue_index);
5931
5932	vsi = ice_locate_vsi_using_queue(vsi, queue_index);
5933	if (!vsi) {
5934		netdev_err(netdev, "Invalid VSI for given queue %d\n",
5935			   queue_index);
5936		return -EINVAL;
5937	}
5938
5939	/* Set BW back to default, when user set maxrate to 0 */
5940	if (!maxrate)
5941		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5942					       q_handle, ICE_MAX_BW);
5943	else
5944		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5945					  q_handle, ICE_MAX_BW, maxrate * 1000);
5946	if (status)
5947		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5948			   status);
 
 
5949
5950	return status;
5951}
5952
5953/**
5954 * ice_fdb_add - add an entry to the hardware database
5955 * @ndm: the input from the stack
5956 * @tb: pointer to array of nladdr (unused)
5957 * @dev: the net device pointer
5958 * @addr: the MAC address entry being added
5959 * @vid: VLAN ID
5960 * @flags: instructions from stack about fdb operation
5961 * @extack: netlink extended ack
5962 */
5963static int
5964ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5965	    struct net_device *dev, const unsigned char *addr, u16 vid,
5966	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5967{
5968	int err;
5969
5970	if (vid) {
5971		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5972		return -EINVAL;
5973	}
5974	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5975		netdev_err(dev, "FDB only supports static addresses\n");
5976		return -EINVAL;
5977	}
5978
5979	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5980		err = dev_uc_add_excl(dev, addr);
5981	else if (is_multicast_ether_addr(addr))
5982		err = dev_mc_add_excl(dev, addr);
5983	else
5984		err = -EINVAL;
5985
5986	/* Only return duplicate errors if NLM_F_EXCL is set */
5987	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5988		err = 0;
5989
5990	return err;
5991}
5992
5993/**
5994 * ice_fdb_del - delete an entry from the hardware database
5995 * @ndm: the input from the stack
5996 * @tb: pointer to array of nladdr (unused)
5997 * @dev: the net device pointer
5998 * @addr: the MAC address entry being added
5999 * @vid: VLAN ID
6000 * @extack: netlink extended ack
6001 */
6002static int
6003ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6004	    struct net_device *dev, const unsigned char *addr,
6005	    __always_unused u16 vid, struct netlink_ext_ack *extack)
6006{
6007	int err;
6008
6009	if (ndm->ndm_state & NUD_PERMANENT) {
6010		netdev_err(dev, "FDB only supports static addresses\n");
6011		return -EINVAL;
6012	}
6013
6014	if (is_unicast_ether_addr(addr))
6015		err = dev_uc_del(dev, addr);
6016	else if (is_multicast_ether_addr(addr))
6017		err = dev_mc_del(dev, addr);
6018	else
6019		err = -EINVAL;
6020
6021	return err;
6022}
6023
6024#define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6025					 NETIF_F_HW_VLAN_CTAG_TX | \
6026					 NETIF_F_HW_VLAN_STAG_RX | \
6027					 NETIF_F_HW_VLAN_STAG_TX)
6028
6029#define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6030					 NETIF_F_HW_VLAN_STAG_RX)
6031
6032#define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
6033					 NETIF_F_HW_VLAN_STAG_FILTER)
6034
6035/**
6036 * ice_fix_features - fix the netdev features flags based on device limitations
6037 * @netdev: ptr to the netdev that flags are being fixed on
6038 * @features: features that need to be checked and possibly fixed
6039 *
6040 * Make sure any fixups are made to features in this callback. This enables the
6041 * driver to not have to check unsupported configurations throughout the driver
6042 * because that's the responsiblity of this callback.
6043 *
6044 * Single VLAN Mode (SVM) Supported Features:
6045 *	NETIF_F_HW_VLAN_CTAG_FILTER
6046 *	NETIF_F_HW_VLAN_CTAG_RX
6047 *	NETIF_F_HW_VLAN_CTAG_TX
6048 *
6049 * Double VLAN Mode (DVM) Supported Features:
6050 *	NETIF_F_HW_VLAN_CTAG_FILTER
6051 *	NETIF_F_HW_VLAN_CTAG_RX
6052 *	NETIF_F_HW_VLAN_CTAG_TX
6053 *
6054 *	NETIF_F_HW_VLAN_STAG_FILTER
6055 *	NETIF_HW_VLAN_STAG_RX
6056 *	NETIF_HW_VLAN_STAG_TX
6057 *
6058 * Features that need fixing:
6059 *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6060 *	These are mutually exlusive as the VSI context cannot support multiple
6061 *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
6062 *	is not done, then default to clearing the requested STAG offload
6063 *	settings.
6064 *
6065 *	All supported filtering has to be enabled or disabled together. For
6066 *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6067 *	together. If this is not done, then default to VLAN filtering disabled.
6068 *	These are mutually exclusive as there is currently no way to
6069 *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6070 *	prune rules.
6071 */
6072static netdev_features_t
6073ice_fix_features(struct net_device *netdev, netdev_features_t features)
6074{
6075	struct ice_netdev_priv *np = netdev_priv(netdev);
6076	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6077	bool cur_ctag, cur_stag, req_ctag, req_stag;
6078
6079	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6080	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6081	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6082
6083	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6084	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6085	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6086
6087	if (req_vlan_fltr != cur_vlan_fltr) {
6088		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6089			if (req_ctag && req_stag) {
6090				features |= NETIF_VLAN_FILTERING_FEATURES;
6091			} else if (!req_ctag && !req_stag) {
6092				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6093			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
6094				   (!cur_stag && req_stag && !cur_ctag)) {
6095				features |= NETIF_VLAN_FILTERING_FEATURES;
6096				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6097			} else if ((cur_ctag && !req_ctag && cur_stag) ||
6098				   (cur_stag && !req_stag && cur_ctag)) {
6099				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6100				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6101			}
6102		} else {
6103			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6104				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6105
6106			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6107				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6108		}
6109	}
6110
6111	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6112	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6113		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6114		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6115			      NETIF_F_HW_VLAN_STAG_TX);
6116	}
6117
6118	if (!(netdev->features & NETIF_F_RXFCS) &&
6119	    (features & NETIF_F_RXFCS) &&
6120	    (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6121	    !ice_vsi_has_non_zero_vlans(np->vsi)) {
6122		netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6123		features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6124	}
6125
6126	return features;
6127}
6128
6129/**
6130 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6131 * @vsi: PF's VSI
6132 * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
6133 *
6134 * Store current stripped VLAN proto in ring packet context,
6135 * so it can be accessed more efficiently by packet processing code.
6136 */
6137static void
6138ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6139{
6140	u16 i;
6141
6142	ice_for_each_alloc_rxq(vsi, i)
6143		vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6144}
6145
6146/**
6147 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6148 * @vsi: PF's VSI
6149 * @features: features used to determine VLAN offload settings
6150 *
6151 * First, determine the vlan_ethertype based on the VLAN offload bits in
6152 * features. Then determine if stripping and insertion should be enabled or
6153 * disabled. Finally enable or disable VLAN stripping and insertion.
6154 */
6155static int
6156ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6157{
6158	bool enable_stripping = true, enable_insertion = true;
6159	struct ice_vsi_vlan_ops *vlan_ops;
6160	int strip_err = 0, insert_err = 0;
6161	u16 vlan_ethertype = 0;
6162
6163	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6164
6165	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6166		vlan_ethertype = ETH_P_8021AD;
6167	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6168		vlan_ethertype = ETH_P_8021Q;
6169
6170	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6171		enable_stripping = false;
6172	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6173		enable_insertion = false;
6174
6175	if (enable_stripping)
6176		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6177	else
6178		strip_err = vlan_ops->dis_stripping(vsi);
6179
6180	if (enable_insertion)
6181		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6182	else
6183		insert_err = vlan_ops->dis_insertion(vsi);
6184
6185	if (strip_err || insert_err)
6186		return -EIO;
6187
6188	ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6189				    htons(vlan_ethertype) : 0);
6190
6191	return 0;
6192}
6193
6194/**
6195 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6196 * @vsi: PF's VSI
6197 * @features: features used to determine VLAN filtering settings
6198 *
6199 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6200 * features.
6201 */
6202static int
6203ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6204{
6205	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6206	int err = 0;
6207
6208	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6209	 * if either bit is set
6210	 */
6211	if (features &
6212	    (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6213		err = vlan_ops->ena_rx_filtering(vsi);
6214	else
6215		err = vlan_ops->dis_rx_filtering(vsi);
6216
6217	return err;
6218}
6219
6220/**
6221 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6222 * @netdev: ptr to the netdev being adjusted
6223 * @features: the feature set that the stack is suggesting
6224 *
6225 * Only update VLAN settings if the requested_vlan_features are different than
6226 * the current_vlan_features.
6227 */
6228static int
6229ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6230{
6231	netdev_features_t current_vlan_features, requested_vlan_features;
6232	struct ice_netdev_priv *np = netdev_priv(netdev);
6233	struct ice_vsi *vsi = np->vsi;
6234	int err;
6235
6236	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6237	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6238	if (current_vlan_features ^ requested_vlan_features) {
6239		if ((features & NETIF_F_RXFCS) &&
6240		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6241			dev_err(ice_pf_to_dev(vsi->back),
6242				"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6243			return -EIO;
6244		}
6245
6246		err = ice_set_vlan_offload_features(vsi, features);
6247		if (err)
6248			return err;
6249	}
6250
6251	current_vlan_features = netdev->features &
6252		NETIF_VLAN_FILTERING_FEATURES;
6253	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6254	if (current_vlan_features ^ requested_vlan_features) {
6255		err = ice_set_vlan_filtering_features(vsi, features);
6256		if (err)
6257			return err;
6258	}
6259
6260	return 0;
6261}
6262
6263/**
6264 * ice_set_loopback - turn on/off loopback mode on underlying PF
6265 * @vsi: ptr to VSI
6266 * @ena: flag to indicate the on/off setting
6267 */
6268static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6269{
6270	bool if_running = netif_running(vsi->netdev);
6271	int ret;
6272
6273	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6274		ret = ice_down(vsi);
6275		if (ret) {
6276			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6277			return ret;
6278		}
6279	}
6280	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6281	if (ret)
6282		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6283	if (if_running)
6284		ret = ice_up(vsi);
6285
6286	return ret;
6287}
6288
6289/**
6290 * ice_set_features - set the netdev feature flags
6291 * @netdev: ptr to the netdev being adjusted
6292 * @features: the feature set that the stack is suggesting
6293 */
6294static int
6295ice_set_features(struct net_device *netdev, netdev_features_t features)
6296{
6297	netdev_features_t changed = netdev->features ^ features;
6298	struct ice_netdev_priv *np = netdev_priv(netdev);
6299	struct ice_vsi *vsi = np->vsi;
6300	struct ice_pf *pf = vsi->back;
6301	int ret = 0;
6302
6303	/* Don't set any netdev advanced features with device in Safe Mode */
6304	if (ice_is_safe_mode(pf)) {
6305		dev_err(ice_pf_to_dev(pf),
6306			"Device is in Safe Mode - not enabling advanced netdev features\n");
6307		return ret;
6308	}
6309
6310	/* Do not change setting during reset */
6311	if (ice_is_reset_in_progress(pf->state)) {
6312		dev_err(ice_pf_to_dev(pf),
6313			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6314		return -EBUSY;
6315	}
6316
6317	/* Multiple features can be changed in one call so keep features in
6318	 * separate if/else statements to guarantee each feature is checked
6319	 */
6320	if (changed & NETIF_F_RXHASH)
6321		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6322
6323	ret = ice_set_vlan_features(netdev, features);
6324	if (ret)
6325		return ret;
6326
6327	/* Turn on receive of FCS aka CRC, and after setting this
6328	 * flag the packet data will have the 4 byte CRC appended
6329	 */
6330	if (changed & NETIF_F_RXFCS) {
6331		if ((features & NETIF_F_RXFCS) &&
6332		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6333			dev_err(ice_pf_to_dev(vsi->back),
6334				"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6335			return -EIO;
6336		}
6337
6338		ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6339		ret = ice_down_up(vsi);
6340		if (ret)
6341			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
6342	}
6343
6344	if (changed & NETIF_F_NTUPLE) {
6345		bool ena = !!(features & NETIF_F_NTUPLE);
6346
6347		ice_vsi_manage_fdir(vsi, ena);
6348		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6349	}
6350
6351	/* don't turn off hw_tc_offload when ADQ is already enabled */
6352	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6353		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6354		return -EACCES;
6355	}
6356
6357	if (changed & NETIF_F_HW_TC) {
6358		bool ena = !!(features & NETIF_F_HW_TC);
6359
6360		ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6361		      clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6362	}
6363
6364	if (changed & NETIF_F_LOOPBACK)
6365		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6366
6367	return ret;
6368}
6369
6370/**
6371 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6372 * @vsi: VSI to setup VLAN properties for
6373 */
6374static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6375{
6376	int err;
6377
6378	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6379	if (err)
6380		return err;
6381
6382	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6383	if (err)
6384		return err;
 
6385
6386	return ice_vsi_add_vlan_zero(vsi);
6387}
6388
6389/**
6390 * ice_vsi_cfg_lan - Setup the VSI lan related config
6391 * @vsi: the VSI being configured
6392 *
6393 * Return 0 on success and negative value on error
6394 */
6395int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6396{
6397	int err;
6398
6399	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6400		ice_set_rx_mode(vsi->netdev);
6401
6402		err = ice_vsi_vlan_setup(vsi);
 
6403		if (err)
6404			return err;
6405	}
6406	ice_vsi_cfg_dcb_rings(vsi);
6407
6408	err = ice_vsi_cfg_lan_txqs(vsi);
6409	if (!err && ice_is_xdp_ena_vsi(vsi))
6410		err = ice_vsi_cfg_xdp_txqs(vsi);
6411	if (!err)
6412		err = ice_vsi_cfg_rxqs(vsi);
6413
6414	return err;
6415}
6416
6417/* THEORY OF MODERATION:
6418 * The ice driver hardware works differently than the hardware that DIMLIB was
 
6419 * originally made for. ice hardware doesn't have packet count limits that
6420 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6421 * which is hard-coded to a limit of 250,000 ints/second.
6422 * If not using dynamic moderation, the INTRL value can be modified
6423 * by ethtool rx-usecs-high.
 
6424 */
6425struct ice_dim {
6426	/* the throttle rate for interrupts, basically worst case delay before
6427	 * an initial interrupt fires, value is stored in microseconds.
6428	 */
6429	u16 itr;
 
 
 
 
 
 
 
6430};
6431
6432/* Make a different profile for Rx that doesn't allow quite so aggressive
6433 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6434 * second.
 
 
 
 
6435 */
6436static const struct ice_dim rx_profile[] = {
6437	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6438	{8},    /* 125,000 ints/s */
6439	{16},   /*  62,500 ints/s */
6440	{62},   /*  16,129 ints/s */
6441	{126}   /*   7,936 ints/s */
6442};
6443
6444/* The transmit profile, which has the same sorts of values
6445 * as the previous struct
6446 */
6447static const struct ice_dim tx_profile[] = {
6448	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6449	{8},    /* 125,000 ints/s */
6450	{40},   /*  16,125 ints/s */
6451	{128},  /*   7,812 ints/s */
6452	{256}   /*   3,906 ints/s */
6453};
6454
6455static void ice_tx_dim_work(struct work_struct *work)
6456{
6457	struct ice_ring_container *rc;
 
6458	struct dim *dim;
6459	u16 itr;
6460
6461	dim = container_of(work, struct dim, work);
6462	rc = dim->priv;
 
6463
6464	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
 
6465
6466	/* look up the values in our local table */
6467	itr = tx_profile[dim->profile_ix].itr;
 
6468
6469	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6470	ice_write_itr(rc, itr);
 
6471
6472	dim->state = DIM_START_MEASURE;
6473}
6474
6475static void ice_rx_dim_work(struct work_struct *work)
6476{
6477	struct ice_ring_container *rc;
 
6478	struct dim *dim;
6479	u16 itr;
6480
6481	dim = container_of(work, struct dim, work);
6482	rc = dim->priv;
 
6483
6484	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
 
6485
6486	/* look up the values in our local table */
6487	itr = rx_profile[dim->profile_ix].itr;
 
6488
6489	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6490	ice_write_itr(rc, itr);
 
6491
6492	dim->state = DIM_START_MEASURE;
6493}
6494
6495#define ICE_DIM_DEFAULT_PROFILE_IX 1
6496
6497/**
6498 * ice_init_moderation - set up interrupt moderation
6499 * @q_vector: the vector containing rings to be configured
6500 *
6501 * Set up interrupt moderation registers, with the intent to do the right thing
6502 * when called from reset or from probe, and whether or not dynamic moderation
6503 * is enabled or not. Take special care to write all the registers in both
6504 * dynamic moderation mode or not in order to make sure hardware is in a known
6505 * state.
6506 */
6507static void ice_init_moderation(struct ice_q_vector *q_vector)
6508{
6509	struct ice_ring_container *rc;
6510	bool tx_dynamic, rx_dynamic;
6511
6512	rc = &q_vector->tx;
6513	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6514	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6515	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6516	rc->dim.priv = rc;
6517	tx_dynamic = ITR_IS_DYNAMIC(rc);
6518
6519	/* set the initial TX ITR to match the above */
6520	ice_write_itr(rc, tx_dynamic ?
6521		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6522
6523	rc = &q_vector->rx;
6524	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6525	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6526	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6527	rc->dim.priv = rc;
6528	rx_dynamic = ITR_IS_DYNAMIC(rc);
6529
6530	/* set the initial RX ITR to match the above */
6531	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6532				       rc->itr_setting);
6533
6534	ice_set_q_vector_intrl(q_vector);
6535}
6536
6537/**
6538 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6539 * @vsi: the VSI being configured
6540 */
6541static void ice_napi_enable_all(struct ice_vsi *vsi)
6542{
6543	int q_idx;
6544
6545	if (!vsi->netdev)
6546		return;
6547
6548	ice_for_each_q_vector(vsi, q_idx) {
6549		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6550
6551		ice_init_moderation(q_vector);
 
 
 
 
6552
6553		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6554			napi_enable(&q_vector->napi);
6555	}
6556}
6557
6558/**
6559 * ice_up_complete - Finish the last steps of bringing up a connection
6560 * @vsi: The VSI being configured
6561 *
6562 * Return 0 on success and negative value on error
6563 */
6564static int ice_up_complete(struct ice_vsi *vsi)
6565{
6566	struct ice_pf *pf = vsi->back;
6567	int err;
6568
6569	ice_vsi_cfg_msix(vsi);
6570
6571	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6572	 * Tx queue group list was configured and the context bits were
6573	 * programmed using ice_vsi_cfg_txqs
6574	 */
6575	err = ice_vsi_start_all_rx_rings(vsi);
6576	if (err)
6577		return err;
6578
6579	clear_bit(ICE_VSI_DOWN, vsi->state);
6580	ice_napi_enable_all(vsi);
6581	ice_vsi_ena_irq(vsi);
6582
6583	if (vsi->port_info &&
6584	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6585	    vsi->netdev && vsi->type == ICE_VSI_PF) {
6586		ice_print_link_msg(vsi, true);
6587		netif_tx_start_all_queues(vsi->netdev);
6588		netif_carrier_on(vsi->netdev);
6589		ice_ptp_link_change(pf, pf->hw.pf_id, true);
6590	}
6591
6592	/* Perform an initial read of the statistics registers now to
6593	 * set the baseline so counters are ready when interface is up
6594	 */
6595	ice_update_eth_stats(vsi);
6596
6597	if (vsi->type == ICE_VSI_PF)
6598		ice_service_task_schedule(pf);
6599
6600	return 0;
6601}
6602
6603/**
6604 * ice_up - Bring the connection back up after being down
6605 * @vsi: VSI being configured
6606 */
6607int ice_up(struct ice_vsi *vsi)
6608{
6609	int err;
6610
6611	err = ice_vsi_cfg_lan(vsi);
6612	if (!err)
6613		err = ice_up_complete(vsi);
6614
6615	return err;
6616}
6617
6618/**
6619 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6620 * @syncp: pointer to u64_stats_sync
6621 * @stats: stats that pkts and bytes count will be taken from
6622 * @pkts: packets stats counter
6623 * @bytes: bytes stats counter
6624 *
6625 * This function fetches stats from the ring considering the atomic operations
6626 * that needs to be performed to read u64 values in 32 bit machine.
6627 */
6628void
6629ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6630			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6631{
6632	unsigned int start;
 
 
6633
 
 
6634	do {
6635		start = u64_stats_fetch_begin(syncp);
6636		*pkts = stats.pkts;
6637		*bytes = stats.bytes;
6638	} while (u64_stats_fetch_retry(syncp, start));
6639}
6640
6641/**
6642 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6643 * @vsi: the VSI to be updated
6644 * @vsi_stats: the stats struct to be updated
6645 * @rings: rings to work on
6646 * @count: number of rings
6647 */
6648static void
6649ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6650			     struct rtnl_link_stats64 *vsi_stats,
6651			     struct ice_tx_ring **rings, u16 count)
6652{
 
6653	u16 i;
6654
6655	for (i = 0; i < count; i++) {
6656		struct ice_tx_ring *ring;
6657		u64 pkts = 0, bytes = 0;
6658
6659		ring = READ_ONCE(rings[i]);
6660		if (!ring || !ring->ring_stats)
6661			continue;
6662		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6663					     ring->ring_stats->stats, &pkts,
6664					     &bytes);
6665		vsi_stats->tx_packets += pkts;
6666		vsi_stats->tx_bytes += bytes;
6667		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6668		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6669		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6670	}
6671}
6672
6673/**
6674 * ice_update_vsi_ring_stats - Update VSI stats counters
6675 * @vsi: the VSI to be updated
6676 */
6677static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6678{
6679	struct rtnl_link_stats64 *net_stats, *stats_prev;
6680	struct rtnl_link_stats64 *vsi_stats;
6681	struct ice_pf *pf = vsi->back;
6682	u64 pkts, bytes;
6683	int i;
6684
6685	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6686	if (!vsi_stats)
6687		return;
 
 
6688
6689	/* reset non-netdev (extended) stats */
6690	vsi->tx_restart = 0;
6691	vsi->tx_busy = 0;
6692	vsi->tx_linearize = 0;
6693	vsi->rx_buf_failed = 0;
6694	vsi->rx_page_failed = 0;
6695
6696	rcu_read_lock();
6697
6698	/* update Tx rings counters */
6699	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6700				     vsi->num_txq);
6701
6702	/* update Rx rings counters */
6703	ice_for_each_rxq(vsi, i) {
6704		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6705		struct ice_ring_stats *ring_stats;
6706
6707		ring_stats = ring->ring_stats;
6708		ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6709					     ring_stats->stats, &pkts,
6710					     &bytes);
6711		vsi_stats->rx_packets += pkts;
6712		vsi_stats->rx_bytes += bytes;
6713		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6714		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6715	}
6716
6717	/* update XDP Tx rings counters */
6718	if (ice_is_xdp_ena_vsi(vsi))
6719		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6720					     vsi->num_xdp_txq);
6721
6722	rcu_read_unlock();
6723
6724	net_stats = &vsi->net_stats;
6725	stats_prev = &vsi->net_stats_prev;
6726
6727	/* Update netdev counters, but keep in mind that values could start at
6728	 * random value after PF reset. And as we increase the reported stat by
6729	 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6730	 * let's skip this round.
6731	 */
6732	if (likely(pf->stat_prev_loaded)) {
6733		net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6734		net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6735		net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6736		net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6737	}
6738
6739	stats_prev->tx_packets = vsi_stats->tx_packets;
6740	stats_prev->tx_bytes = vsi_stats->tx_bytes;
6741	stats_prev->rx_packets = vsi_stats->rx_packets;
6742	stats_prev->rx_bytes = vsi_stats->rx_bytes;
6743
6744	kfree(vsi_stats);
6745}
6746
6747/**
6748 * ice_update_vsi_stats - Update VSI stats counters
6749 * @vsi: the VSI to be updated
6750 */
6751void ice_update_vsi_stats(struct ice_vsi *vsi)
6752{
6753	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6754	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6755	struct ice_pf *pf = vsi->back;
6756
6757	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6758	    test_bit(ICE_CFG_BUSY, pf->state))
6759		return;
6760
6761	/* get stats as recorded by Tx/Rx rings */
6762	ice_update_vsi_ring_stats(vsi);
6763
6764	/* get VSI stats as recorded by the hardware */
6765	ice_update_eth_stats(vsi);
6766
6767	cur_ns->tx_errors = cur_es->tx_errors;
6768	cur_ns->rx_dropped = cur_es->rx_discards;
6769	cur_ns->tx_dropped = cur_es->tx_discards;
6770	cur_ns->multicast = cur_es->rx_multicast;
6771
6772	/* update some more netdev stats if this is main VSI */
6773	if (vsi->type == ICE_VSI_PF) {
6774		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6775		cur_ns->rx_errors = pf->stats.crc_errors +
6776				    pf->stats.illegal_bytes +
 
6777				    pf->stats.rx_undersize +
6778				    pf->hw_csum_rx_error +
6779				    pf->stats.rx_jabber +
6780				    pf->stats.rx_fragments +
6781				    pf->stats.rx_oversize;
 
6782		/* record drops from the port level */
6783		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6784	}
6785}
6786
6787/**
6788 * ice_update_pf_stats - Update PF port stats counters
6789 * @pf: PF whose stats needs to be updated
6790 */
6791void ice_update_pf_stats(struct ice_pf *pf)
6792{
6793	struct ice_hw_port_stats *prev_ps, *cur_ps;
6794	struct ice_hw *hw = &pf->hw;
6795	u16 fd_ctr_base;
6796	u8 port;
6797
6798	port = hw->port_info->lport;
6799	prev_ps = &pf->stats_prev;
6800	cur_ps = &pf->stats;
6801
6802	if (ice_is_reset_in_progress(pf->state))
6803		pf->stat_prev_loaded = false;
6804
6805	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6806			  &prev_ps->eth.rx_bytes,
6807			  &cur_ps->eth.rx_bytes);
6808
6809	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6810			  &prev_ps->eth.rx_unicast,
6811			  &cur_ps->eth.rx_unicast);
6812
6813	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6814			  &prev_ps->eth.rx_multicast,
6815			  &cur_ps->eth.rx_multicast);
6816
6817	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6818			  &prev_ps->eth.rx_broadcast,
6819			  &cur_ps->eth.rx_broadcast);
6820
6821	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6822			  &prev_ps->eth.rx_discards,
6823			  &cur_ps->eth.rx_discards);
6824
6825	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6826			  &prev_ps->eth.tx_bytes,
6827			  &cur_ps->eth.tx_bytes);
6828
6829	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6830			  &prev_ps->eth.tx_unicast,
6831			  &cur_ps->eth.tx_unicast);
6832
6833	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6834			  &prev_ps->eth.tx_multicast,
6835			  &cur_ps->eth.tx_multicast);
6836
6837	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6838			  &prev_ps->eth.tx_broadcast,
6839			  &cur_ps->eth.tx_broadcast);
6840
6841	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6842			  &prev_ps->tx_dropped_link_down,
6843			  &cur_ps->tx_dropped_link_down);
6844
6845	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6846			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6847
6848	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6849			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6850
6851	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6852			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6853
6854	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6855			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6856
6857	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6858			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6859
6860	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6861			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6862
6863	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6864			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6865
6866	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6867			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6868
6869	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6870			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6871
6872	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6873			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6874
6875	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6876			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6877
6878	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6879			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6880
6881	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6882			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6883
6884	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6885			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6886
6887	fd_ctr_base = hw->fd_ctr_base;
6888
6889	ice_stat_update40(hw,
6890			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6891			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6892			  &cur_ps->fd_sb_match);
6893	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6894			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6895
6896	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6897			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6898
6899	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6900			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6901
6902	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6903			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6904
6905	ice_update_dcb_stats(pf);
6906
6907	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6908			  &prev_ps->crc_errors, &cur_ps->crc_errors);
6909
6910	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6911			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6912
6913	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6914			  &prev_ps->mac_local_faults,
6915			  &cur_ps->mac_local_faults);
6916
6917	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6918			  &prev_ps->mac_remote_faults,
6919			  &cur_ps->mac_remote_faults);
6920
 
 
 
6921	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6922			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6923
6924	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6925			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6926
6927	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6928			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6929
6930	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6931			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6932
6933	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6934
6935	pf->stat_prev_loaded = true;
6936}
6937
6938/**
6939 * ice_get_stats64 - get statistics for network device structure
6940 * @netdev: network interface device structure
6941 * @stats: main device statistics structure
6942 */
6943static
6944void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6945{
6946	struct ice_netdev_priv *np = netdev_priv(netdev);
6947	struct rtnl_link_stats64 *vsi_stats;
6948	struct ice_vsi *vsi = np->vsi;
6949
6950	vsi_stats = &vsi->net_stats;
6951
6952	if (!vsi->num_txq || !vsi->num_rxq)
6953		return;
6954
6955	/* netdev packet/byte stats come from ring counter. These are obtained
6956	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6957	 * But, only call the update routine and read the registers if VSI is
6958	 * not down.
6959	 */
6960	if (!test_bit(ICE_VSI_DOWN, vsi->state))
6961		ice_update_vsi_ring_stats(vsi);
6962	stats->tx_packets = vsi_stats->tx_packets;
6963	stats->tx_bytes = vsi_stats->tx_bytes;
6964	stats->rx_packets = vsi_stats->rx_packets;
6965	stats->rx_bytes = vsi_stats->rx_bytes;
6966
6967	/* The rest of the stats can be read from the hardware but instead we
6968	 * just return values that the watchdog task has already obtained from
6969	 * the hardware.
6970	 */
6971	stats->multicast = vsi_stats->multicast;
6972	stats->tx_errors = vsi_stats->tx_errors;
6973	stats->tx_dropped = vsi_stats->tx_dropped;
6974	stats->rx_errors = vsi_stats->rx_errors;
6975	stats->rx_dropped = vsi_stats->rx_dropped;
6976	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6977	stats->rx_length_errors = vsi_stats->rx_length_errors;
6978}
6979
6980/**
6981 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6982 * @vsi: VSI having NAPI disabled
6983 */
6984static void ice_napi_disable_all(struct ice_vsi *vsi)
6985{
6986	int q_idx;
6987
6988	if (!vsi->netdev)
6989		return;
6990
6991	ice_for_each_q_vector(vsi, q_idx) {
6992		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6993
6994		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6995			napi_disable(&q_vector->napi);
6996
6997		cancel_work_sync(&q_vector->tx.dim.work);
6998		cancel_work_sync(&q_vector->rx.dim.work);
6999	}
7000}
7001
7002/**
7003 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7004 * @vsi: the VSI being un-configured
7005 */
7006static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7007{
7008	struct ice_pf *pf = vsi->back;
7009	struct ice_hw *hw = &pf->hw;
7010	u32 val;
7011	int i;
7012
7013	/* disable interrupt causation from each Rx queue; Tx queues are
7014	 * handled in ice_vsi_stop_tx_ring()
7015	 */
7016	if (vsi->rx_rings) {
7017		ice_for_each_rxq(vsi, i) {
7018			if (vsi->rx_rings[i]) {
7019				u16 reg;
7020
7021				reg = vsi->rx_rings[i]->reg_idx;
7022				val = rd32(hw, QINT_RQCTL(reg));
7023				val &= ~QINT_RQCTL_CAUSE_ENA_M;
7024				wr32(hw, QINT_RQCTL(reg), val);
7025			}
7026		}
7027	}
7028
7029	/* disable each interrupt */
7030	ice_for_each_q_vector(vsi, i) {
7031		if (!vsi->q_vectors[i])
7032			continue;
7033		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7034	}
7035
7036	ice_flush(hw);
7037
7038	/* don't call synchronize_irq() for VF's from the host */
7039	if (vsi->type == ICE_VSI_VF)
7040		return;
7041
7042	ice_for_each_q_vector(vsi, i)
7043		synchronize_irq(vsi->q_vectors[i]->irq.virq);
7044}
7045
7046/**
7047 * ice_down - Shutdown the connection
7048 * @vsi: The VSI being stopped
7049 *
7050 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7051 */
7052int ice_down(struct ice_vsi *vsi)
7053{
7054	int i, tx_err, rx_err, vlan_err = 0;
7055
7056	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7057
7058	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
7059		vlan_err = ice_vsi_del_vlan_zero(vsi);
7060		ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
7061		netif_carrier_off(vsi->netdev);
7062		netif_tx_disable(vsi->netdev);
7063	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
7064		ice_eswitch_stop_all_tx_queues(vsi->back);
7065	}
7066
7067	ice_vsi_dis_irq(vsi);
7068
7069	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7070	if (tx_err)
7071		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7072			   vsi->vsi_num, tx_err);
7073	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
7074		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7075		if (tx_err)
7076			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7077				   vsi->vsi_num, tx_err);
7078	}
7079
7080	rx_err = ice_vsi_stop_all_rx_rings(vsi);
7081	if (rx_err)
7082		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7083			   vsi->vsi_num, rx_err);
7084
7085	ice_napi_disable_all(vsi);
7086
 
 
 
 
 
 
 
7087	ice_for_each_txq(vsi, i)
7088		ice_clean_tx_ring(vsi->tx_rings[i]);
7089
7090	if (ice_is_xdp_ena_vsi(vsi))
7091		ice_for_each_xdp_txq(vsi, i)
7092			ice_clean_tx_ring(vsi->xdp_rings[i]);
7093
7094	ice_for_each_rxq(vsi, i)
7095		ice_clean_rx_ring(vsi->rx_rings[i]);
7096
7097	if (tx_err || rx_err || vlan_err) {
7098		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7099			   vsi->vsi_num, vsi->vsw->sw_id);
7100		return -EIO;
7101	}
7102
7103	return 0;
7104}
7105
7106/**
7107 * ice_down_up - shutdown the VSI connection and bring it up
7108 * @vsi: the VSI to be reconnected
7109 */
7110int ice_down_up(struct ice_vsi *vsi)
7111{
7112	int ret;
7113
7114	/* if DOWN already set, nothing to do */
7115	if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7116		return 0;
7117
7118	ret = ice_down(vsi);
7119	if (ret)
7120		return ret;
7121
7122	ret = ice_up(vsi);
7123	if (ret) {
7124		netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7125		return ret;
7126	}
7127
7128	return 0;
7129}
7130
7131/**
7132 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7133 * @vsi: VSI having resources allocated
7134 *
7135 * Return 0 on success, negative on failure
7136 */
7137int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7138{
7139	int i, err = 0;
7140
7141	if (!vsi->num_txq) {
7142		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7143			vsi->vsi_num);
7144		return -EINVAL;
7145	}
7146
7147	ice_for_each_txq(vsi, i) {
7148		struct ice_tx_ring *ring = vsi->tx_rings[i];
7149
7150		if (!ring)
7151			return -EINVAL;
7152
7153		if (vsi->netdev)
7154			ring->netdev = vsi->netdev;
7155		err = ice_setup_tx_ring(ring);
7156		if (err)
7157			break;
7158	}
7159
7160	return err;
7161}
7162
7163/**
7164 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7165 * @vsi: VSI having resources allocated
7166 *
7167 * Return 0 on success, negative on failure
7168 */
7169int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7170{
7171	int i, err = 0;
7172
7173	if (!vsi->num_rxq) {
7174		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7175			vsi->vsi_num);
7176		return -EINVAL;
7177	}
7178
7179	ice_for_each_rxq(vsi, i) {
7180		struct ice_rx_ring *ring = vsi->rx_rings[i];
7181
7182		if (!ring)
7183			return -EINVAL;
7184
7185		if (vsi->netdev)
7186			ring->netdev = vsi->netdev;
7187		err = ice_setup_rx_ring(ring);
7188		if (err)
7189			break;
7190	}
7191
7192	return err;
7193}
7194
7195/**
7196 * ice_vsi_open_ctrl - open control VSI for use
7197 * @vsi: the VSI to open
7198 *
7199 * Initialization of the Control VSI
7200 *
7201 * Returns 0 on success, negative value on error
7202 */
7203int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7204{
7205	char int_name[ICE_INT_NAME_STR_LEN];
7206	struct ice_pf *pf = vsi->back;
7207	struct device *dev;
7208	int err;
7209
7210	dev = ice_pf_to_dev(pf);
7211	/* allocate descriptors */
7212	err = ice_vsi_setup_tx_rings(vsi);
7213	if (err)
7214		goto err_setup_tx;
7215
7216	err = ice_vsi_setup_rx_rings(vsi);
7217	if (err)
7218		goto err_setup_rx;
7219
7220	err = ice_vsi_cfg_lan(vsi);
7221	if (err)
7222		goto err_setup_rx;
7223
7224	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7225		 dev_driver_string(dev), dev_name(dev));
7226	err = ice_vsi_req_irq_msix(vsi, int_name);
7227	if (err)
7228		goto err_setup_rx;
7229
7230	ice_vsi_cfg_msix(vsi);
7231
7232	err = ice_vsi_start_all_rx_rings(vsi);
7233	if (err)
7234		goto err_up_complete;
7235
7236	clear_bit(ICE_VSI_DOWN, vsi->state);
7237	ice_vsi_ena_irq(vsi);
7238
7239	return 0;
7240
7241err_up_complete:
7242	ice_down(vsi);
7243err_setup_rx:
7244	ice_vsi_free_rx_rings(vsi);
7245err_setup_tx:
7246	ice_vsi_free_tx_rings(vsi);
7247
7248	return err;
7249}
7250
7251/**
7252 * ice_vsi_open - Called when a network interface is made active
7253 * @vsi: the VSI to open
7254 *
7255 * Initialization of the VSI
7256 *
7257 * Returns 0 on success, negative value on error
7258 */
7259int ice_vsi_open(struct ice_vsi *vsi)
7260{
7261	char int_name[ICE_INT_NAME_STR_LEN];
7262	struct ice_pf *pf = vsi->back;
7263	int err;
7264
7265	/* allocate descriptors */
7266	err = ice_vsi_setup_tx_rings(vsi);
7267	if (err)
7268		goto err_setup_tx;
7269
7270	err = ice_vsi_setup_rx_rings(vsi);
7271	if (err)
7272		goto err_setup_rx;
7273
7274	err = ice_vsi_cfg_lan(vsi);
7275	if (err)
7276		goto err_setup_rx;
7277
7278	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7279		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7280	err = ice_vsi_req_irq_msix(vsi, int_name);
7281	if (err)
7282		goto err_setup_rx;
7283
7284	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7285
7286	if (vsi->type == ICE_VSI_PF) {
7287		/* Notify the stack of the actual queue counts. */
7288		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7289		if (err)
7290			goto err_set_qs;
7291
7292		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7293		if (err)
7294			goto err_set_qs;
7295	}
7296
7297	err = ice_up_complete(vsi);
7298	if (err)
7299		goto err_up_complete;
7300
7301	return 0;
7302
7303err_up_complete:
7304	ice_down(vsi);
7305err_set_qs:
7306	ice_vsi_free_irq(vsi);
7307err_setup_rx:
7308	ice_vsi_free_rx_rings(vsi);
7309err_setup_tx:
7310	ice_vsi_free_tx_rings(vsi);
7311
7312	return err;
7313}
7314
7315/**
7316 * ice_vsi_release_all - Delete all VSIs
7317 * @pf: PF from which all VSIs are being removed
7318 */
7319static void ice_vsi_release_all(struct ice_pf *pf)
7320{
7321	int err, i;
7322
7323	if (!pf->vsi)
7324		return;
7325
7326	ice_for_each_vsi(pf, i) {
7327		if (!pf->vsi[i])
7328			continue;
7329
7330		if (pf->vsi[i]->type == ICE_VSI_CHNL)
7331			continue;
7332
7333		err = ice_vsi_release(pf->vsi[i]);
7334		if (err)
7335			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7336				i, err, pf->vsi[i]->vsi_num);
7337	}
7338}
7339
7340/**
7341 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7342 * @pf: pointer to the PF instance
7343 * @type: VSI type to rebuild
7344 *
7345 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7346 */
7347static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7348{
7349	struct device *dev = ice_pf_to_dev(pf);
 
7350	int i, err;
7351
7352	ice_for_each_vsi(pf, i) {
7353		struct ice_vsi *vsi = pf->vsi[i];
7354
7355		if (!vsi || vsi->type != type)
7356			continue;
7357
7358		/* rebuild the VSI */
7359		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7360		if (err) {
7361			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7362				err, vsi->idx, ice_vsi_type_str(type));
7363			return err;
7364		}
7365
7366		/* replay filters for the VSI */
7367		err = ice_replay_vsi(&pf->hw, vsi->idx);
7368		if (err) {
7369			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7370				err, vsi->idx, ice_vsi_type_str(type));
7371			return err;
 
7372		}
7373
7374		/* Re-map HW VSI number, using VSI handle that has been
7375		 * previously validated in ice_replay_vsi() call above
7376		 */
7377		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7378
7379		/* enable the VSI */
7380		err = ice_ena_vsi(vsi, false);
7381		if (err) {
7382			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7383				err, vsi->idx, ice_vsi_type_str(type));
7384			return err;
7385		}
7386
7387		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7388			 ice_vsi_type_str(type));
7389	}
7390
7391	return 0;
7392}
7393
7394/**
7395 * ice_update_pf_netdev_link - Update PF netdev link status
7396 * @pf: pointer to the PF instance
7397 */
7398static void ice_update_pf_netdev_link(struct ice_pf *pf)
7399{
7400	bool link_up;
7401	int i;
7402
7403	ice_for_each_vsi(pf, i) {
7404		struct ice_vsi *vsi = pf->vsi[i];
7405
7406		if (!vsi || vsi->type != ICE_VSI_PF)
7407			return;
7408
7409		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7410		if (link_up) {
7411			netif_carrier_on(pf->vsi[i]->netdev);
7412			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7413		} else {
7414			netif_carrier_off(pf->vsi[i]->netdev);
7415			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7416		}
7417	}
7418}
7419
7420/**
7421 * ice_rebuild - rebuild after reset
7422 * @pf: PF to rebuild
7423 * @reset_type: type of reset
7424 *
7425 * Do not rebuild VF VSI in this flow because that is already handled via
7426 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7427 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7428 * to reset/rebuild all the VF VSI twice.
7429 */
7430static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7431{
7432	struct device *dev = ice_pf_to_dev(pf);
7433	struct ice_hw *hw = &pf->hw;
7434	bool dvm;
7435	int err;
7436
7437	if (test_bit(ICE_DOWN, pf->state))
7438		goto clear_recovery;
7439
7440	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7441
7442#define ICE_EMP_RESET_SLEEP_MS 5000
7443	if (reset_type == ICE_RESET_EMPR) {
7444		/* If an EMP reset has occurred, any previously pending flash
7445		 * update will have completed. We no longer know whether or
7446		 * not the NVM update EMP reset is restricted.
7447		 */
7448		pf->fw_emp_reset_disabled = false;
7449
7450		msleep(ICE_EMP_RESET_SLEEP_MS);
7451	}
7452
7453	err = ice_init_all_ctrlq(hw);
7454	if (err) {
7455		dev_err(dev, "control queues init failed %d\n", err);
7456		goto err_init_ctrlq;
7457	}
7458
7459	/* if DDP was previously loaded successfully */
7460	if (!ice_is_safe_mode(pf)) {
7461		/* reload the SW DB of filter tables */
7462		if (reset_type == ICE_RESET_PFR)
7463			ice_fill_blk_tbls(hw);
7464		else
7465			/* Reload DDP Package after CORER/GLOBR reset */
7466			ice_load_pkg(NULL, pf);
7467	}
7468
7469	err = ice_clear_pf_cfg(hw);
7470	if (err) {
7471		dev_err(dev, "clear PF configuration failed %d\n", err);
 
7472		goto err_init_ctrlq;
7473	}
7474
 
 
 
 
 
 
7475	ice_clear_pxe_mode(hw);
7476
7477	err = ice_init_nvm(hw);
7478	if (err) {
7479		dev_err(dev, "ice_init_nvm failed %d\n", err);
7480		goto err_init_ctrlq;
7481	}
7482
7483	err = ice_get_caps(hw);
7484	if (err) {
7485		dev_err(dev, "ice_get_caps failed %d\n", err);
7486		goto err_init_ctrlq;
7487	}
7488
7489	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7490	if (err) {
7491		dev_err(dev, "set_mac_cfg failed %d\n", err);
7492		goto err_init_ctrlq;
7493	}
7494
7495	dvm = ice_is_dvm_ena(hw);
7496
7497	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7498	if (err)
7499		goto err_init_ctrlq;
7500
7501	err = ice_sched_init_port(hw->port_info);
7502	if (err)
7503		goto err_sched_init_port;
7504
7505	/* start misc vector */
7506	err = ice_req_irq_msix_misc(pf);
7507	if (err) {
7508		dev_err(dev, "misc vector setup failed: %d\n", err);
7509		goto err_sched_init_port;
7510	}
7511
7512	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7513		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7514		if (!rd32(hw, PFQF_FD_SIZE)) {
7515			u16 unused, guar, b_effort;
7516
7517			guar = hw->func_caps.fd_fltr_guar;
7518			b_effort = hw->func_caps.fd_fltr_best_effort;
7519
7520			/* force guaranteed filter pool for PF */
7521			ice_alloc_fd_guar_item(hw, &unused, guar);
7522			/* force shared filter pool for PF */
7523			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7524		}
7525	}
7526
7527	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7528		ice_dcb_rebuild(pf);
7529
7530	/* If the PF previously had enabled PTP, PTP init needs to happen before
7531	 * the VSI rebuild. If not, this causes the PTP link status events to
7532	 * fail.
7533	 */
7534	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7535		ice_ptp_rebuild(pf, reset_type);
7536
7537	if (ice_is_feature_supported(pf, ICE_F_GNSS))
7538		ice_gnss_init(pf);
7539
7540	/* rebuild PF VSI */
7541	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7542	if (err) {
7543		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7544		goto err_vsi_rebuild;
7545	}
7546
7547	err = ice_eswitch_rebuild(pf);
7548	if (err) {
7549		dev_err(dev, "Switchdev rebuild failed: %d\n", err);
7550		goto err_vsi_rebuild;
7551	}
7552
7553	if (reset_type == ICE_RESET_PFR) {
7554		err = ice_rebuild_channels(pf);
7555		if (err) {
7556			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7557				err);
7558			goto err_vsi_rebuild;
7559		}
7560	}
7561
7562	/* If Flow Director is active */
7563	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7564		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7565		if (err) {
7566			dev_err(dev, "control VSI rebuild failed: %d\n", err);
7567			goto err_vsi_rebuild;
7568		}
7569
7570		/* replay HW Flow Director recipes */
7571		if (hw->fdir_prof)
7572			ice_fdir_replay_flows(hw);
7573
7574		/* replay Flow Director filters */
7575		ice_fdir_replay_fltrs(pf);
7576
7577		ice_rebuild_arfs(pf);
7578	}
7579
7580	ice_update_pf_netdev_link(pf);
7581
7582	/* tell the firmware we are up */
7583	err = ice_send_version(pf);
7584	if (err) {
7585		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7586			err);
7587		goto err_vsi_rebuild;
7588	}
7589
7590	ice_replay_post(hw);
7591
7592	/* if we get here, reset flow is successful */
7593	clear_bit(ICE_RESET_FAILED, pf->state);
7594
7595	ice_plug_aux_dev(pf);
7596	if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7597		ice_lag_rebuild(pf);
7598
7599	/* Restore timestamp mode settings after VSI rebuild */
7600	ice_ptp_restore_timestamp_mode(pf);
7601	return;
7602
7603err_vsi_rebuild:
7604err_sched_init_port:
7605	ice_sched_cleanup_all(hw);
7606err_init_ctrlq:
7607	ice_shutdown_all_ctrlq(hw);
7608	set_bit(ICE_RESET_FAILED, pf->state);
7609clear_recovery:
7610	/* set this bit in PF state to control service task scheduling */
7611	set_bit(ICE_NEEDS_RESTART, pf->state);
7612	dev_err(dev, "Rebuild failed, unload and reload driver\n");
7613}
7614
7615/**
 
 
 
 
 
 
 
 
 
 
 
 
7616 * ice_change_mtu - NDO callback to change the MTU
7617 * @netdev: network interface device structure
7618 * @new_mtu: new value for maximum frame size
7619 *
7620 * Returns 0 on success, negative on failure
7621 */
7622static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7623{
7624	struct ice_netdev_priv *np = netdev_priv(netdev);
7625	struct ice_vsi *vsi = np->vsi;
7626	struct ice_pf *pf = vsi->back;
7627	struct bpf_prog *prog;
7628	u8 count = 0;
7629	int err = 0;
7630
7631	if (new_mtu == (int)netdev->mtu) {
7632		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7633		return 0;
7634	}
7635
7636	prog = vsi->xdp_prog;
7637	if (prog && !prog->aux->xdp_has_frags) {
7638		int frame_size = ice_max_xdp_frame_size(vsi);
7639
7640		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7641			netdev_err(netdev, "max MTU for XDP usage is %d\n",
7642				   frame_size - ICE_ETH_PKT_HDR_PAD);
7643			return -EINVAL;
7644		}
7645	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7646		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7647			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7648				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7649			return -EINVAL;
7650		}
7651	}
7652
7653	/* if a reset is in progress, wait for some time for it to complete */
7654	do {
7655		if (ice_is_reset_in_progress(pf->state)) {
7656			count++;
7657			usleep_range(1000, 2000);
7658		} else {
7659			break;
7660		}
7661
7662	} while (count < 100);
7663
7664	if (count == 100) {
7665		netdev_err(netdev, "can't change MTU. Device is busy\n");
7666		return -EBUSY;
7667	}
7668
 
 
 
 
 
 
 
 
7669	netdev->mtu = (unsigned int)new_mtu;
7670	err = ice_down_up(vsi);
7671	if (err)
7672		return err;
 
 
 
 
 
 
 
 
 
 
 
 
7673
7674	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7675	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
 
 
 
7676
7677	return err;
7678}
7679
7680/**
7681 * ice_eth_ioctl - Access the hwtstamp interface
7682 * @netdev: network interface device structure
7683 * @ifr: interface request data
7684 * @cmd: ioctl command
7685 */
7686static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7687{
7688	struct ice_netdev_priv *np = netdev_priv(netdev);
7689	struct ice_pf *pf = np->vsi->back;
7690
7691	switch (cmd) {
7692	case SIOCGHWTSTAMP:
7693		return ice_ptp_get_ts_config(pf, ifr);
7694	case SIOCSHWTSTAMP:
7695		return ice_ptp_set_ts_config(pf, ifr);
7696	default:
7697		return -EOPNOTSUPP;
7698	}
7699}
7700
7701/**
7702 * ice_aq_str - convert AQ err code to a string
7703 * @aq_err: the AQ error code to convert
7704 */
7705const char *ice_aq_str(enum ice_aq_err aq_err)
7706{
7707	switch (aq_err) {
7708	case ICE_AQ_RC_OK:
7709		return "OK";
7710	case ICE_AQ_RC_EPERM:
7711		return "ICE_AQ_RC_EPERM";
7712	case ICE_AQ_RC_ENOENT:
7713		return "ICE_AQ_RC_ENOENT";
7714	case ICE_AQ_RC_ENOMEM:
7715		return "ICE_AQ_RC_ENOMEM";
7716	case ICE_AQ_RC_EBUSY:
7717		return "ICE_AQ_RC_EBUSY";
7718	case ICE_AQ_RC_EEXIST:
7719		return "ICE_AQ_RC_EEXIST";
7720	case ICE_AQ_RC_EINVAL:
7721		return "ICE_AQ_RC_EINVAL";
7722	case ICE_AQ_RC_ENOSPC:
7723		return "ICE_AQ_RC_ENOSPC";
7724	case ICE_AQ_RC_ENOSYS:
7725		return "ICE_AQ_RC_ENOSYS";
7726	case ICE_AQ_RC_EMODE:
7727		return "ICE_AQ_RC_EMODE";
7728	case ICE_AQ_RC_ENOSEC:
7729		return "ICE_AQ_RC_ENOSEC";
7730	case ICE_AQ_RC_EBADSIG:
7731		return "ICE_AQ_RC_EBADSIG";
7732	case ICE_AQ_RC_ESVN:
7733		return "ICE_AQ_RC_ESVN";
7734	case ICE_AQ_RC_EBADMAN:
7735		return "ICE_AQ_RC_EBADMAN";
7736	case ICE_AQ_RC_EBADBUF:
7737		return "ICE_AQ_RC_EBADBUF";
7738	}
7739
7740	return "ICE_AQ_RC_UNKNOWN";
7741}
7742
7743/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7744 * ice_set_rss_lut - Set RSS LUT
7745 * @vsi: Pointer to VSI structure
7746 * @lut: Lookup table
7747 * @lut_size: Lookup table size
7748 *
7749 * Returns 0 on success, negative on failure
7750 */
7751int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7752{
7753	struct ice_aq_get_set_rss_lut_params params = {};
7754	struct ice_hw *hw = &vsi->back->hw;
7755	int status;
7756
7757	if (!lut)
7758		return -EINVAL;
7759
7760	params.vsi_handle = vsi->idx;
7761	params.lut_size = lut_size;
7762	params.lut_type = vsi->rss_lut_type;
7763	params.lut = lut;
7764
7765	status = ice_aq_set_rss_lut(hw, &params);
7766	if (status)
7767		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7768			status, ice_aq_str(hw->adminq.sq_last_status));
 
 
 
7769
7770	return status;
7771}
7772
7773/**
7774 * ice_set_rss_key - Set RSS key
7775 * @vsi: Pointer to the VSI structure
7776 * @seed: RSS hash seed
7777 *
7778 * Returns 0 on success, negative on failure
7779 */
7780int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7781{
7782	struct ice_hw *hw = &vsi->back->hw;
7783	int status;
7784
7785	if (!seed)
7786		return -EINVAL;
7787
7788	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7789	if (status)
7790		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7791			status, ice_aq_str(hw->adminq.sq_last_status));
 
 
 
7792
7793	return status;
7794}
7795
7796/**
7797 * ice_get_rss_lut - Get RSS LUT
7798 * @vsi: Pointer to VSI structure
7799 * @lut: Buffer to store the lookup table entries
7800 * @lut_size: Size of buffer to store the lookup table entries
7801 *
7802 * Returns 0 on success, negative on failure
7803 */
7804int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7805{
7806	struct ice_aq_get_set_rss_lut_params params = {};
7807	struct ice_hw *hw = &vsi->back->hw;
7808	int status;
7809
7810	if (!lut)
7811		return -EINVAL;
7812
7813	params.vsi_handle = vsi->idx;
7814	params.lut_size = lut_size;
7815	params.lut_type = vsi->rss_lut_type;
7816	params.lut = lut;
7817
7818	status = ice_aq_get_rss_lut(hw, &params);
7819	if (status)
7820		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7821			status, ice_aq_str(hw->adminq.sq_last_status));
 
 
 
7822
7823	return status;
7824}
7825
7826/**
7827 * ice_get_rss_key - Get RSS key
7828 * @vsi: Pointer to VSI structure
7829 * @seed: Buffer to store the key in
7830 *
7831 * Returns 0 on success, negative on failure
7832 */
7833int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7834{
7835	struct ice_hw *hw = &vsi->back->hw;
7836	int status;
7837
7838	if (!seed)
7839		return -EINVAL;
7840
7841	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7842	if (status)
7843		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7844			status, ice_aq_str(hw->adminq.sq_last_status));
7845
7846	return status;
7847}
7848
7849/**
7850 * ice_set_rss_hfunc - Set RSS HASH function
7851 * @vsi: Pointer to VSI structure
7852 * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
7853 *
7854 * Returns 0 on success, negative on failure
7855 */
7856int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
7857{
7858	struct ice_hw *hw = &vsi->back->hw;
7859	struct ice_vsi_ctx *ctx;
7860	bool symm;
7861	int err;
7862
7863	if (hfunc == vsi->rss_hfunc)
7864		return 0;
7865
7866	if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
7867	    hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
7868		return -EOPNOTSUPP;
7869
7870	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7871	if (!ctx)
7872		return -ENOMEM;
7873
7874	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
7875	ctx->info.q_opt_rss = vsi->info.q_opt_rss;
7876	ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
7877	ctx->info.q_opt_rss |=
7878		FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
7879	ctx->info.q_opt_tc = vsi->info.q_opt_tc;
7880	ctx->info.q_opt_flags = vsi->info.q_opt_rss;
7881
7882	err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
7883	if (err) {
7884		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
7885			vsi->vsi_num, err);
7886	} else {
7887		vsi->info.q_opt_rss = ctx->info.q_opt_rss;
7888		vsi->rss_hfunc = hfunc;
7889		netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
7890			    hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
7891			    "Symmetric " : "");
7892	}
7893	kfree(ctx);
7894	if (err)
7895		return err;
7896
7897	/* Fix the symmetry setting for all existing RSS configurations */
7898	symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
7899	return ice_set_rss_cfg_symm(hw, vsi, symm);
7900}
7901
7902/**
7903 * ice_bridge_getlink - Get the hardware bridge mode
7904 * @skb: skb buff
7905 * @pid: process ID
7906 * @seq: RTNL message seq
7907 * @dev: the netdev being configured
7908 * @filter_mask: filter mask passed in
7909 * @nlflags: netlink flags passed in
7910 *
7911 * Return the bridge mode (VEB/VEPA)
7912 */
7913static int
7914ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7915		   struct net_device *dev, u32 filter_mask, int nlflags)
7916{
7917	struct ice_netdev_priv *np = netdev_priv(dev);
7918	struct ice_vsi *vsi = np->vsi;
7919	struct ice_pf *pf = vsi->back;
7920	u16 bmode;
7921
7922	bmode = pf->first_sw->bridge_mode;
7923
7924	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7925				       filter_mask, NULL);
7926}
7927
7928/**
7929 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7930 * @vsi: Pointer to VSI structure
7931 * @bmode: Hardware bridge mode (VEB/VEPA)
7932 *
7933 * Returns 0 on success, negative on failure
7934 */
7935static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7936{
7937	struct ice_aqc_vsi_props *vsi_props;
7938	struct ice_hw *hw = &vsi->back->hw;
7939	struct ice_vsi_ctx *ctxt;
7940	int ret;
 
7941
7942	vsi_props = &vsi->info;
7943
7944	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7945	if (!ctxt)
7946		return -ENOMEM;
7947
7948	ctxt->info = vsi->info;
7949
7950	if (bmode == BRIDGE_MODE_VEB)
7951		/* change from VEPA to VEB mode */
7952		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7953	else
7954		/* change from VEB to VEPA mode */
7955		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7956	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7957
7958	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7959	if (ret) {
7960		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7961			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
 
 
7962		goto out;
7963	}
7964	/* Update sw flags for book keeping */
7965	vsi_props->sw_flags = ctxt->info.sw_flags;
7966
7967out:
7968	kfree(ctxt);
7969	return ret;
7970}
7971
7972/**
7973 * ice_bridge_setlink - Set the hardware bridge mode
7974 * @dev: the netdev being configured
7975 * @nlh: RTNL message
7976 * @flags: bridge setlink flags
7977 * @extack: netlink extended ack
7978 *
7979 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7980 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7981 * not already set for all VSIs connected to this switch. And also update the
7982 * unicast switch filter rules for the corresponding switch of the netdev.
7983 */
7984static int
7985ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7986		   u16 __always_unused flags,
7987		   struct netlink_ext_ack __always_unused *extack)
7988{
7989	struct ice_netdev_priv *np = netdev_priv(dev);
7990	struct ice_pf *pf = np->vsi->back;
7991	struct nlattr *attr, *br_spec;
7992	struct ice_hw *hw = &pf->hw;
 
7993	struct ice_sw *pf_sw;
7994	int rem, v, err = 0;
7995
7996	pf_sw = pf->first_sw;
7997	/* find the attribute in the netlink message */
7998	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7999	if (!br_spec)
8000		return -EINVAL;
8001
8002	nla_for_each_nested(attr, br_spec, rem) {
8003		__u16 mode;
8004
8005		if (nla_type(attr) != IFLA_BRIDGE_MODE)
8006			continue;
8007		mode = nla_get_u16(attr);
8008		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
8009			return -EINVAL;
8010		/* Continue  if bridge mode is not being flipped */
8011		if (mode == pf_sw->bridge_mode)
8012			continue;
8013		/* Iterates through the PF VSI list and update the loopback
8014		 * mode of the VSI
8015		 */
8016		ice_for_each_vsi(pf, v) {
8017			if (!pf->vsi[v])
8018				continue;
8019			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8020			if (err)
8021				return err;
8022		}
8023
8024		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
8025		/* Update the unicast switch filter rules for the corresponding
8026		 * switch of the netdev
8027		 */
8028		err = ice_update_sw_rule_bridge_mode(hw);
8029		if (err) {
8030			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
8031				   mode, err,
8032				   ice_aq_str(hw->adminq.sq_last_status));
8033			/* revert hw->evb_veb */
8034			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
8035			return err;
8036		}
8037
8038		pf_sw->bridge_mode = mode;
8039	}
8040
8041	return 0;
8042}
8043
8044/**
8045 * ice_tx_timeout - Respond to a Tx Hang
8046 * @netdev: network interface device structure
8047 * @txqueue: Tx queue
8048 */
8049static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
8050{
8051	struct ice_netdev_priv *np = netdev_priv(netdev);
8052	struct ice_tx_ring *tx_ring = NULL;
8053	struct ice_vsi *vsi = np->vsi;
8054	struct ice_pf *pf = vsi->back;
8055	u32 i;
8056
8057	pf->tx_timeout_count++;
8058
8059	/* Check if PFC is enabled for the TC to which the queue belongs
8060	 * to. If yes then Tx timeout is not caused by a hung queue, no
8061	 * need to reset and rebuild
8062	 */
8063	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8064		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8065			 txqueue);
8066		return;
8067	}
8068
8069	/* now that we have an index, find the tx_ring struct */
8070	ice_for_each_txq(vsi, i)
8071		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8072			if (txqueue == vsi->tx_rings[i]->q_index) {
8073				tx_ring = vsi->tx_rings[i];
8074				break;
8075			}
8076
8077	/* Reset recovery level if enough time has elapsed after last timeout.
8078	 * Also ensure no new reset action happens before next timeout period.
8079	 */
8080	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8081		pf->tx_timeout_recovery_level = 1;
8082	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8083				       netdev->watchdog_timeo)))
8084		return;
8085
8086	if (tx_ring) {
8087		struct ice_hw *hw = &pf->hw;
8088		u32 head, val = 0;
8089
8090		head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
8091				 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8092		/* Read interrupt register */
8093		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8094
8095		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8096			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8097			    head, tx_ring->next_to_use, val);
8098	}
8099
8100	pf->tx_timeout_last_recovery = jiffies;
8101	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8102		    pf->tx_timeout_recovery_level, txqueue);
8103
8104	switch (pf->tx_timeout_recovery_level) {
8105	case 1:
8106		set_bit(ICE_PFR_REQ, pf->state);
8107		break;
8108	case 2:
8109		set_bit(ICE_CORER_REQ, pf->state);
8110		break;
8111	case 3:
8112		set_bit(ICE_GLOBR_REQ, pf->state);
8113		break;
8114	default:
8115		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8116		set_bit(ICE_DOWN, pf->state);
8117		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8118		set_bit(ICE_SERVICE_DIS, pf->state);
8119		break;
8120	}
8121
8122	ice_service_task_schedule(pf);
8123	pf->tx_timeout_recovery_level++;
8124}
8125
8126/**
8127 * ice_setup_tc_cls_flower - flower classifier offloads
8128 * @np: net device to configure
8129 * @filter_dev: device on which filter is added
8130 * @cls_flower: offload data
8131 */
8132static int
8133ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8134			struct net_device *filter_dev,
8135			struct flow_cls_offload *cls_flower)
8136{
8137	struct ice_vsi *vsi = np->vsi;
8138
8139	if (cls_flower->common.chain_index)
8140		return -EOPNOTSUPP;
8141
8142	switch (cls_flower->command) {
8143	case FLOW_CLS_REPLACE:
8144		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
8145	case FLOW_CLS_DESTROY:
8146		return ice_del_cls_flower(vsi, cls_flower);
8147	default:
8148		return -EINVAL;
8149	}
8150}
8151
8152/**
8153 * ice_setup_tc_block_cb - callback handler registered for TC block
8154 * @type: TC SETUP type
8155 * @type_data: TC flower offload data that contains user input
8156 * @cb_priv: netdev private data
8157 */
8158static int
8159ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
8160{
8161	struct ice_netdev_priv *np = cb_priv;
8162
8163	switch (type) {
8164	case TC_SETUP_CLSFLOWER:
8165		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8166					       type_data);
8167	default:
8168		return -EOPNOTSUPP;
8169	}
8170}
8171
8172/**
8173 * ice_validate_mqprio_qopt - Validate TCF input parameters
8174 * @vsi: Pointer to VSI
8175 * @mqprio_qopt: input parameters for mqprio queue configuration
8176 *
8177 * This function validates MQPRIO params, such as qcount (power of 2 wherever
8178 * needed), and make sure user doesn't specify qcount and BW rate limit
8179 * for TCs, which are more than "num_tc"
8180 */
8181static int
8182ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8183			 struct tc_mqprio_qopt_offload *mqprio_qopt)
8184{
8185	int non_power_of_2_qcount = 0;
8186	struct ice_pf *pf = vsi->back;
8187	int max_rss_q_cnt = 0;
8188	u64 sum_min_rate = 0;
8189	struct device *dev;
8190	int i, speed;
8191	u8 num_tc;
8192
8193	if (vsi->type != ICE_VSI_PF)
8194		return -EINVAL;
8195
8196	if (mqprio_qopt->qopt.offset[0] != 0 ||
8197	    mqprio_qopt->qopt.num_tc < 1 ||
8198	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8199		return -EINVAL;
8200
8201	dev = ice_pf_to_dev(pf);
8202	vsi->ch_rss_size = 0;
8203	num_tc = mqprio_qopt->qopt.num_tc;
8204	speed = ice_get_link_speed_kbps(vsi);
8205
8206	for (i = 0; num_tc; i++) {
8207		int qcount = mqprio_qopt->qopt.count[i];
8208		u64 max_rate, min_rate, rem;
8209
8210		if (!qcount)
8211			return -EINVAL;
8212
8213		if (is_power_of_2(qcount)) {
8214			if (non_power_of_2_qcount &&
8215			    qcount > non_power_of_2_qcount) {
8216				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8217					qcount, non_power_of_2_qcount);
8218				return -EINVAL;
8219			}
8220			if (qcount > max_rss_q_cnt)
8221				max_rss_q_cnt = qcount;
8222		} else {
8223			if (non_power_of_2_qcount &&
8224			    qcount != non_power_of_2_qcount) {
8225				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8226					qcount, non_power_of_2_qcount);
8227				return -EINVAL;
8228			}
8229			if (qcount < max_rss_q_cnt) {
8230				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8231					qcount, max_rss_q_cnt);
8232				return -EINVAL;
8233			}
8234			max_rss_q_cnt = qcount;
8235			non_power_of_2_qcount = qcount;
8236		}
8237
8238		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8239		 * converts the bandwidth rate limit into Bytes/s when
8240		 * passing it down to the driver. So convert input bandwidth
8241		 * from Bytes/s to Kbps
8242		 */
8243		max_rate = mqprio_qopt->max_rate[i];
8244		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8245
8246		/* min_rate is minimum guaranteed rate and it can't be zero */
8247		min_rate = mqprio_qopt->min_rate[i];
8248		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8249		sum_min_rate += min_rate;
8250
8251		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8252			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8253				min_rate, ICE_MIN_BW_LIMIT);
8254			return -EINVAL;
8255		}
8256
8257		if (max_rate && max_rate > speed) {
8258			dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8259				i, max_rate, speed);
8260			return -EINVAL;
8261		}
8262
8263		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8264		if (rem) {
8265			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8266				i, ICE_MIN_BW_LIMIT);
8267			return -EINVAL;
8268		}
8269
8270		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8271		if (rem) {
8272			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8273				i, ICE_MIN_BW_LIMIT);
8274			return -EINVAL;
8275		}
8276
8277		/* min_rate can't be more than max_rate, except when max_rate
8278		 * is zero (implies max_rate sought is max line rate). In such
8279		 * a case min_rate can be more than max.
8280		 */
8281		if (max_rate && min_rate > max_rate) {
8282			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8283				min_rate, max_rate);
8284			return -EINVAL;
8285		}
8286
8287		if (i >= mqprio_qopt->qopt.num_tc - 1)
8288			break;
8289		if (mqprio_qopt->qopt.offset[i + 1] !=
8290		    (mqprio_qopt->qopt.offset[i] + qcount))
8291			return -EINVAL;
8292	}
8293	if (vsi->num_rxq <
8294	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8295		return -EINVAL;
8296	if (vsi->num_txq <
8297	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8298		return -EINVAL;
8299
8300	if (sum_min_rate && sum_min_rate > (u64)speed) {
8301		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8302			sum_min_rate, speed);
8303		return -EINVAL;
8304	}
8305
8306	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8307	vsi->ch_rss_size = max_rss_q_cnt;
8308
8309	return 0;
8310}
8311
8312/**
8313 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8314 * @pf: ptr to PF device
8315 * @vsi: ptr to VSI
8316 */
8317static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8318{
8319	struct device *dev = ice_pf_to_dev(pf);
8320	bool added = false;
8321	struct ice_hw *hw;
8322	int flow;
8323
8324	if (!(vsi->num_gfltr || vsi->num_bfltr))
8325		return -EINVAL;
8326
8327	hw = &pf->hw;
8328	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8329		struct ice_fd_hw_prof *prof;
8330		int tun, status;
8331		u64 entry_h;
8332
8333		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8334		      hw->fdir_prof[flow]->cnt))
8335			continue;
8336
8337		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8338			enum ice_flow_priority prio;
8339
8340			/* add this VSI to FDir profile for this flow */
8341			prio = ICE_FLOW_PRIO_NORMAL;
8342			prof = hw->fdir_prof[flow];
8343			status = ice_flow_add_entry(hw, ICE_BLK_FD,
8344						    prof->prof_id[tun],
8345						    prof->vsi_h[0], vsi->idx,
8346						    prio, prof->fdir_seg[tun],
8347						    &entry_h);
8348			if (status) {
8349				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8350					vsi->idx, flow);
8351				continue;
8352			}
8353
8354			prof->entry_h[prof->cnt][tun] = entry_h;
8355		}
8356
8357		/* store VSI for filter replay and delete */
8358		prof->vsi_h[prof->cnt] = vsi->idx;
8359		prof->cnt++;
8360
8361		added = true;
8362		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8363			flow);
8364	}
8365
8366	if (!added)
8367		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8368
8369	return 0;
8370}
8371
8372/**
8373 * ice_add_channel - add a channel by adding VSI
8374 * @pf: ptr to PF device
8375 * @sw_id: underlying HW switching element ID
8376 * @ch: ptr to channel structure
8377 *
8378 * Add a channel (VSI) using add_vsi and queue_map
8379 */
8380static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8381{
8382	struct device *dev = ice_pf_to_dev(pf);
8383	struct ice_vsi *vsi;
8384
8385	if (ch->type != ICE_VSI_CHNL) {
8386		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8387		return -EINVAL;
8388	}
8389
8390	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8391	if (!vsi || vsi->type != ICE_VSI_CHNL) {
8392		dev_err(dev, "create chnl VSI failure\n");
8393		return -EINVAL;
8394	}
8395
8396	ice_add_vsi_to_fdir(pf, vsi);
8397
8398	ch->sw_id = sw_id;
8399	ch->vsi_num = vsi->vsi_num;
8400	ch->info.mapping_flags = vsi->info.mapping_flags;
8401	ch->ch_vsi = vsi;
8402	/* set the back pointer of channel for newly created VSI */
8403	vsi->ch = ch;
8404
8405	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8406	       sizeof(vsi->info.q_mapping));
8407	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8408	       sizeof(vsi->info.tc_mapping));
8409
8410	return 0;
8411}
8412
8413/**
8414 * ice_chnl_cfg_res
8415 * @vsi: the VSI being setup
8416 * @ch: ptr to channel structure
8417 *
8418 * Configure channel specific resources such as rings, vector.
8419 */
8420static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8421{
8422	int i;
8423
8424	for (i = 0; i < ch->num_txq; i++) {
8425		struct ice_q_vector *tx_q_vector, *rx_q_vector;
8426		struct ice_ring_container *rc;
8427		struct ice_tx_ring *tx_ring;
8428		struct ice_rx_ring *rx_ring;
8429
8430		tx_ring = vsi->tx_rings[ch->base_q + i];
8431		rx_ring = vsi->rx_rings[ch->base_q + i];
8432		if (!tx_ring || !rx_ring)
8433			continue;
8434
8435		/* setup ring being channel enabled */
8436		tx_ring->ch = ch;
8437		rx_ring->ch = ch;
8438
8439		/* following code block sets up vector specific attributes */
8440		tx_q_vector = tx_ring->q_vector;
8441		rx_q_vector = rx_ring->q_vector;
8442		if (!tx_q_vector && !rx_q_vector)
8443			continue;
8444
8445		if (tx_q_vector) {
8446			tx_q_vector->ch = ch;
8447			/* setup Tx and Rx ITR setting if DIM is off */
8448			rc = &tx_q_vector->tx;
8449			if (!ITR_IS_DYNAMIC(rc))
8450				ice_write_itr(rc, rc->itr_setting);
8451		}
8452		if (rx_q_vector) {
8453			rx_q_vector->ch = ch;
8454			/* setup Tx and Rx ITR setting if DIM is off */
8455			rc = &rx_q_vector->rx;
8456			if (!ITR_IS_DYNAMIC(rc))
8457				ice_write_itr(rc, rc->itr_setting);
8458		}
8459	}
8460
8461	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8462	 * GLINT_ITR register would have written to perform in-context
8463	 * update, hence perform flush
8464	 */
8465	if (ch->num_txq || ch->num_rxq)
8466		ice_flush(&vsi->back->hw);
8467}
8468
8469/**
8470 * ice_cfg_chnl_all_res - configure channel resources
8471 * @vsi: pte to main_vsi
8472 * @ch: ptr to channel structure
8473 *
8474 * This function configures channel specific resources such as flow-director
8475 * counter index, and other resources such as queues, vectors, ITR settings
8476 */
8477static void
8478ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8479{
8480	/* configure channel (aka ADQ) resources such as queues, vectors,
8481	 * ITR settings for channel specific vectors and anything else
8482	 */
8483	ice_chnl_cfg_res(vsi, ch);
8484}
8485
8486/**
8487 * ice_setup_hw_channel - setup new channel
8488 * @pf: ptr to PF device
8489 * @vsi: the VSI being setup
8490 * @ch: ptr to channel structure
8491 * @sw_id: underlying HW switching element ID
8492 * @type: type of channel to be created (VMDq2/VF)
8493 *
8494 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8495 * and configures Tx rings accordingly
8496 */
8497static int
8498ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8499		     struct ice_channel *ch, u16 sw_id, u8 type)
8500{
8501	struct device *dev = ice_pf_to_dev(pf);
8502	int ret;
8503
8504	ch->base_q = vsi->next_base_q;
8505	ch->type = type;
8506
8507	ret = ice_add_channel(pf, sw_id, ch);
8508	if (ret) {
8509		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8510		return ret;
8511	}
8512
8513	/* configure/setup ADQ specific resources */
8514	ice_cfg_chnl_all_res(vsi, ch);
8515
8516	/* make sure to update the next_base_q so that subsequent channel's
8517	 * (aka ADQ) VSI queue map is correct
8518	 */
8519	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8520	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8521		ch->num_rxq);
8522
8523	return 0;
8524}
8525
8526/**
8527 * ice_setup_channel - setup new channel using uplink element
8528 * @pf: ptr to PF device
8529 * @vsi: the VSI being setup
8530 * @ch: ptr to channel structure
8531 *
8532 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8533 * and uplink switching element
8534 */
8535static bool
8536ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8537		  struct ice_channel *ch)
8538{
8539	struct device *dev = ice_pf_to_dev(pf);
8540	u16 sw_id;
8541	int ret;
8542
8543	if (vsi->type != ICE_VSI_PF) {
8544		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8545		return false;
8546	}
8547
8548	sw_id = pf->first_sw->sw_id;
8549
8550	/* create channel (VSI) */
8551	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8552	if (ret) {
8553		dev_err(dev, "failed to setup hw_channel\n");
8554		return false;
8555	}
8556	dev_dbg(dev, "successfully created channel()\n");
8557
8558	return ch->ch_vsi ? true : false;
8559}
8560
8561/**
8562 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8563 * @vsi: VSI to be configured
8564 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8565 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8566 */
8567static int
8568ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8569{
8570	int err;
8571
8572	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8573	if (err)
8574		return err;
8575
8576	return ice_set_max_bw_limit(vsi, max_tx_rate);
8577}
8578
8579/**
8580 * ice_create_q_channel - function to create channel
8581 * @vsi: VSI to be configured
8582 * @ch: ptr to channel (it contains channel specific params)
8583 *
8584 * This function creates channel (VSI) using num_queues specified by user,
8585 * reconfigs RSS if needed.
8586 */
8587static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8588{
8589	struct ice_pf *pf = vsi->back;
8590	struct device *dev;
8591
8592	if (!ch)
8593		return -EINVAL;
8594
8595	dev = ice_pf_to_dev(pf);
8596	if (!ch->num_txq || !ch->num_rxq) {
8597		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8598		return -EINVAL;
8599	}
8600
8601	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8602		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8603			vsi->cnt_q_avail, ch->num_txq);
8604		return -EINVAL;
8605	}
8606
8607	if (!ice_setup_channel(pf, vsi, ch)) {
8608		dev_info(dev, "Failed to setup channel\n");
8609		return -EINVAL;
8610	}
8611	/* configure BW rate limit */
8612	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8613		int ret;
8614
8615		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8616				       ch->min_tx_rate);
8617		if (ret)
8618			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8619				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8620		else
8621			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8622				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8623	}
8624
8625	vsi->cnt_q_avail -= ch->num_txq;
8626
8627	return 0;
8628}
8629
8630/**
8631 * ice_rem_all_chnl_fltrs - removes all channel filters
8632 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8633 *
8634 * Remove all advanced switch filters only if they are channel specific
8635 * tc-flower based filter
8636 */
8637static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8638{
8639	struct ice_tc_flower_fltr *fltr;
8640	struct hlist_node *node;
8641
8642	/* to remove all channel filters, iterate an ordered list of filters */
8643	hlist_for_each_entry_safe(fltr, node,
8644				  &pf->tc_flower_fltr_list,
8645				  tc_flower_node) {
8646		struct ice_rule_query_data rule;
8647		int status;
8648
8649		/* for now process only channel specific filters */
8650		if (!ice_is_chnl_fltr(fltr))
8651			continue;
8652
8653		rule.rid = fltr->rid;
8654		rule.rule_id = fltr->rule_id;
8655		rule.vsi_handle = fltr->dest_vsi_handle;
8656		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8657		if (status) {
8658			if (status == -ENOENT)
8659				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8660					rule.rule_id);
8661			else
8662				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8663					status);
8664		} else if (fltr->dest_vsi) {
8665			/* update advanced switch filter count */
8666			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8667				u32 flags = fltr->flags;
8668
8669				fltr->dest_vsi->num_chnl_fltr--;
8670				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8671					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8672					pf->num_dmac_chnl_fltrs--;
8673			}
8674		}
8675
8676		hlist_del(&fltr->tc_flower_node);
8677		kfree(fltr);
8678	}
8679}
8680
8681/**
8682 * ice_remove_q_channels - Remove queue channels for the TCs
8683 * @vsi: VSI to be configured
8684 * @rem_fltr: delete advanced switch filter or not
8685 *
8686 * Remove queue channels for the TCs
8687 */
8688static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8689{
8690	struct ice_channel *ch, *ch_tmp;
8691	struct ice_pf *pf = vsi->back;
8692	int i;
8693
8694	/* remove all tc-flower based filter if they are channel filters only */
8695	if (rem_fltr)
8696		ice_rem_all_chnl_fltrs(pf);
8697
8698	/* remove ntuple filters since queue configuration is being changed */
8699	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
8700		struct ice_hw *hw = &pf->hw;
8701
8702		mutex_lock(&hw->fdir_fltr_lock);
8703		ice_fdir_del_all_fltrs(vsi);
8704		mutex_unlock(&hw->fdir_fltr_lock);
8705	}
8706
8707	/* perform cleanup for channels if they exist */
8708	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8709		struct ice_vsi *ch_vsi;
8710
8711		list_del(&ch->list);
8712		ch_vsi = ch->ch_vsi;
8713		if (!ch_vsi) {
8714			kfree(ch);
8715			continue;
8716		}
8717
8718		/* Reset queue contexts */
8719		for (i = 0; i < ch->num_rxq; i++) {
8720			struct ice_tx_ring *tx_ring;
8721			struct ice_rx_ring *rx_ring;
8722
8723			tx_ring = vsi->tx_rings[ch->base_q + i];
8724			rx_ring = vsi->rx_rings[ch->base_q + i];
8725			if (tx_ring) {
8726				tx_ring->ch = NULL;
8727				if (tx_ring->q_vector)
8728					tx_ring->q_vector->ch = NULL;
8729			}
8730			if (rx_ring) {
8731				rx_ring->ch = NULL;
8732				if (rx_ring->q_vector)
8733					rx_ring->q_vector->ch = NULL;
8734			}
8735		}
8736
8737		/* Release FD resources for the channel VSI */
8738		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8739
8740		/* clear the VSI from scheduler tree */
8741		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8742
8743		/* Delete VSI from FW, PF and HW VSI arrays */
8744		ice_vsi_delete(ch->ch_vsi);
8745
8746		/* free the channel */
8747		kfree(ch);
8748	}
8749
8750	/* clear the channel VSI map which is stored in main VSI */
8751	ice_for_each_chnl_tc(i)
8752		vsi->tc_map_vsi[i] = NULL;
8753
8754	/* reset main VSI's all TC information */
8755	vsi->all_enatc = 0;
8756	vsi->all_numtc = 0;
8757}
8758
8759/**
8760 * ice_rebuild_channels - rebuild channel
8761 * @pf: ptr to PF
8762 *
8763 * Recreate channel VSIs and replay filters
8764 */
8765static int ice_rebuild_channels(struct ice_pf *pf)
8766{
8767	struct device *dev = ice_pf_to_dev(pf);
8768	struct ice_vsi *main_vsi;
8769	bool rem_adv_fltr = true;
8770	struct ice_channel *ch;
8771	struct ice_vsi *vsi;
8772	int tc_idx = 1;
8773	int i, err;
8774
8775	main_vsi = ice_get_main_vsi(pf);
8776	if (!main_vsi)
8777		return 0;
8778
8779	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8780	    main_vsi->old_numtc == 1)
8781		return 0; /* nothing to be done */
8782
8783	/* reconfigure main VSI based on old value of TC and cached values
8784	 * for MQPRIO opts
8785	 */
8786	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8787	if (err) {
8788		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8789			main_vsi->old_ena_tc, main_vsi->vsi_num);
8790		return err;
8791	}
8792
8793	/* rebuild ADQ VSIs */
8794	ice_for_each_vsi(pf, i) {
8795		enum ice_vsi_type type;
8796
8797		vsi = pf->vsi[i];
8798		if (!vsi || vsi->type != ICE_VSI_CHNL)
8799			continue;
8800
8801		type = vsi->type;
8802
8803		/* rebuild ADQ VSI */
8804		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8805		if (err) {
8806			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8807				ice_vsi_type_str(type), vsi->idx, err);
8808			goto cleanup;
8809		}
8810
8811		/* Re-map HW VSI number, using VSI handle that has been
8812		 * previously validated in ice_replay_vsi() call above
8813		 */
8814		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8815
8816		/* replay filters for the VSI */
8817		err = ice_replay_vsi(&pf->hw, vsi->idx);
8818		if (err) {
8819			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8820				ice_vsi_type_str(type), err, vsi->idx);
8821			rem_adv_fltr = false;
8822			goto cleanup;
8823		}
8824		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8825			 ice_vsi_type_str(type), vsi->idx);
8826
8827		/* store ADQ VSI at correct TC index in main VSI's
8828		 * map of TC to VSI
8829		 */
8830		main_vsi->tc_map_vsi[tc_idx++] = vsi;
8831	}
8832
8833	/* ADQ VSI(s) has been rebuilt successfully, so setup
8834	 * channel for main VSI's Tx and Rx rings
8835	 */
8836	list_for_each_entry(ch, &main_vsi->ch_list, list) {
8837		struct ice_vsi *ch_vsi;
8838
8839		ch_vsi = ch->ch_vsi;
8840		if (!ch_vsi)
8841			continue;
8842
8843		/* reconfig channel resources */
8844		ice_cfg_chnl_all_res(main_vsi, ch);
8845
8846		/* replay BW rate limit if it is non-zero */
8847		if (!ch->max_tx_rate && !ch->min_tx_rate)
8848			continue;
8849
8850		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8851				       ch->min_tx_rate);
8852		if (err)
8853			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8854				err, ch->max_tx_rate, ch->min_tx_rate,
8855				ch_vsi->vsi_num);
8856		else
8857			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8858				ch->max_tx_rate, ch->min_tx_rate,
8859				ch_vsi->vsi_num);
8860	}
8861
8862	/* reconfig RSS for main VSI */
8863	if (main_vsi->ch_rss_size)
8864		ice_vsi_cfg_rss_lut_key(main_vsi);
8865
8866	return 0;
8867
8868cleanup:
8869	ice_remove_q_channels(main_vsi, rem_adv_fltr);
8870	return err;
8871}
8872
8873/**
8874 * ice_create_q_channels - Add queue channel for the given TCs
8875 * @vsi: VSI to be configured
8876 *
8877 * Configures queue channel mapping to the given TCs
8878 */
8879static int ice_create_q_channels(struct ice_vsi *vsi)
8880{
8881	struct ice_pf *pf = vsi->back;
8882	struct ice_channel *ch;
8883	int ret = 0, i;
8884
8885	ice_for_each_chnl_tc(i) {
8886		if (!(vsi->all_enatc & BIT(i)))
8887			continue;
8888
8889		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8890		if (!ch) {
8891			ret = -ENOMEM;
8892			goto err_free;
8893		}
8894		INIT_LIST_HEAD(&ch->list);
8895		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8896		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8897		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8898		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8899		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8900
8901		/* convert to Kbits/s */
8902		if (ch->max_tx_rate)
8903			ch->max_tx_rate = div_u64(ch->max_tx_rate,
8904						  ICE_BW_KBPS_DIVISOR);
8905		if (ch->min_tx_rate)
8906			ch->min_tx_rate = div_u64(ch->min_tx_rate,
8907						  ICE_BW_KBPS_DIVISOR);
8908
8909		ret = ice_create_q_channel(vsi, ch);
8910		if (ret) {
8911			dev_err(ice_pf_to_dev(pf),
8912				"failed creating channel TC:%d\n", i);
8913			kfree(ch);
8914			goto err_free;
8915		}
8916		list_add_tail(&ch->list, &vsi->ch_list);
8917		vsi->tc_map_vsi[i] = ch->ch_vsi;
8918		dev_dbg(ice_pf_to_dev(pf),
8919			"successfully created channel: VSI %pK\n", ch->ch_vsi);
8920	}
8921	return 0;
8922
8923err_free:
8924	ice_remove_q_channels(vsi, false);
8925
8926	return ret;
8927}
8928
8929/**
8930 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8931 * @netdev: net device to configure
8932 * @type_data: TC offload data
8933 */
8934static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8935{
8936	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8937	struct ice_netdev_priv *np = netdev_priv(netdev);
8938	struct ice_vsi *vsi = np->vsi;
8939	struct ice_pf *pf = vsi->back;
8940	u16 mode, ena_tc_qdisc = 0;
8941	int cur_txq, cur_rxq;
8942	u8 hw = 0, num_tcf;
8943	struct device *dev;
8944	int ret, i;
8945
8946	dev = ice_pf_to_dev(pf);
8947	num_tcf = mqprio_qopt->qopt.num_tc;
8948	hw = mqprio_qopt->qopt.hw;
8949	mode = mqprio_qopt->mode;
8950	if (!hw) {
8951		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8952		vsi->ch_rss_size = 0;
8953		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8954		goto config_tcf;
8955	}
8956
8957	/* Generate queue region map for number of TCF requested */
8958	for (i = 0; i < num_tcf; i++)
8959		ena_tc_qdisc |= BIT(i);
8960
8961	switch (mode) {
8962	case TC_MQPRIO_MODE_CHANNEL:
8963
8964		if (pf->hw.port_info->is_custom_tx_enabled) {
8965			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8966			return -EBUSY;
8967		}
8968		ice_tear_down_devlink_rate_tree(pf);
8969
8970		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8971		if (ret) {
8972			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8973				   ret);
8974			return ret;
8975		}
8976		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8977		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8978		/* don't assume state of hw_tc_offload during driver load
8979		 * and set the flag for TC flower filter if hw_tc_offload
8980		 * already ON
8981		 */
8982		if (vsi->netdev->features & NETIF_F_HW_TC)
8983			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8984		break;
8985	default:
8986		return -EINVAL;
8987	}
8988
8989config_tcf:
8990
8991	/* Requesting same TCF configuration as already enabled */
8992	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8993	    mode != TC_MQPRIO_MODE_CHANNEL)
8994		return 0;
8995
8996	/* Pause VSI queues */
8997	ice_dis_vsi(vsi, true);
8998
8999	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9000		ice_remove_q_channels(vsi, true);
9001
9002	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9003		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9004				     num_online_cpus());
9005		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9006				     num_online_cpus());
9007	} else {
9008		/* logic to rebuild VSI, same like ethtool -L */
9009		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
9010
9011		for (i = 0; i < num_tcf; i++) {
9012			if (!(ena_tc_qdisc & BIT(i)))
9013				continue;
9014
9015			offset = vsi->mqprio_qopt.qopt.offset[i];
9016			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9017			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9018		}
9019		vsi->req_txq = offset + qcount_tx;
9020		vsi->req_rxq = offset + qcount_rx;
9021
9022		/* store away original rss_size info, so that it gets reused
9023		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
9024		 * determine, what should be the rss_sizefor main VSI
9025		 */
9026		vsi->orig_rss_size = vsi->rss_size;
9027	}
9028
9029	/* save current values of Tx and Rx queues before calling VSI rebuild
9030	 * for fallback option
9031	 */
9032	cur_txq = vsi->num_txq;
9033	cur_rxq = vsi->num_rxq;
9034
9035	/* proceed with rebuild main VSI using correct number of queues */
9036	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9037	if (ret) {
9038		/* fallback to current number of queues */
9039		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
9040		vsi->req_txq = cur_txq;
9041		vsi->req_rxq = cur_rxq;
9042		clear_bit(ICE_RESET_FAILED, pf->state);
9043		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9044			dev_err(dev, "Rebuild of main VSI failed again\n");
9045			return ret;
9046		}
9047	}
9048
9049	vsi->all_numtc = num_tcf;
9050	vsi->all_enatc = ena_tc_qdisc;
9051	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9052	if (ret) {
9053		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
9054			   vsi->vsi_num);
9055		goto exit;
9056	}
9057
9058	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9059		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9060		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9061
9062		/* set TC0 rate limit if specified */
9063		if (max_tx_rate || min_tx_rate) {
9064			/* convert to Kbits/s */
9065			if (max_tx_rate)
9066				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
9067			if (min_tx_rate)
9068				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
9069
9070			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9071			if (!ret) {
9072				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
9073					max_tx_rate, min_tx_rate, vsi->vsi_num);
9074			} else {
9075				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
9076					max_tx_rate, min_tx_rate, vsi->vsi_num);
9077				goto exit;
9078			}
9079		}
9080		ret = ice_create_q_channels(vsi);
9081		if (ret) {
9082			netdev_err(netdev, "failed configuring queue channels\n");
9083			goto exit;
9084		} else {
9085			netdev_dbg(netdev, "successfully configured channels\n");
9086		}
9087	}
9088
9089	if (vsi->ch_rss_size)
9090		ice_vsi_cfg_rss_lut_key(vsi);
9091
9092exit:
9093	/* if error, reset the all_numtc and all_enatc */
9094	if (ret) {
9095		vsi->all_numtc = 0;
9096		vsi->all_enatc = 0;
9097	}
9098	/* resume VSI */
9099	ice_ena_vsi(vsi, true);
9100
9101	return ret;
9102}
9103
9104static LIST_HEAD(ice_block_cb_list);
9105
9106static int
9107ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9108	     void *type_data)
9109{
9110	struct ice_netdev_priv *np = netdev_priv(netdev);
9111	struct ice_pf *pf = np->vsi->back;
9112	bool locked = false;
9113	int err;
9114
9115	switch (type) {
9116	case TC_SETUP_BLOCK:
9117		return flow_block_cb_setup_simple(type_data,
9118						  &ice_block_cb_list,
9119						  ice_setup_tc_block_cb,
9120						  np, np, true);
9121	case TC_SETUP_QDISC_MQPRIO:
9122		if (ice_is_eswitch_mode_switchdev(pf)) {
9123			netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
9124			return -EOPNOTSUPP;
9125		}
9126
9127		if (pf->adev) {
9128			mutex_lock(&pf->adev_mutex);
9129			device_lock(&pf->adev->dev);
9130			locked = true;
9131			if (pf->adev->dev.driver) {
9132				netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
9133				err = -EBUSY;
9134				goto adev_unlock;
9135			}
9136		}
9137
9138		/* setup traffic classifier for receive side */
9139		mutex_lock(&pf->tc_mutex);
9140		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9141		mutex_unlock(&pf->tc_mutex);
9142
9143adev_unlock:
9144		if (locked) {
9145			device_unlock(&pf->adev->dev);
9146			mutex_unlock(&pf->adev_mutex);
9147		}
9148		return err;
9149	default:
9150		return -EOPNOTSUPP;
9151	}
9152	return -EOPNOTSUPP;
9153}
9154
9155static struct ice_indr_block_priv *
9156ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9157			   struct net_device *netdev)
9158{
9159	struct ice_indr_block_priv *cb_priv;
9160
9161	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9162		if (!cb_priv->netdev)
9163			return NULL;
9164		if (cb_priv->netdev == netdev)
9165			return cb_priv;
9166	}
9167	return NULL;
9168}
9169
9170static int
9171ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9172			void *indr_priv)
9173{
9174	struct ice_indr_block_priv *priv = indr_priv;
9175	struct ice_netdev_priv *np = priv->np;
9176
9177	switch (type) {
9178	case TC_SETUP_CLSFLOWER:
9179		return ice_setup_tc_cls_flower(np, priv->netdev,
9180					       (struct flow_cls_offload *)
9181					       type_data);
9182	default:
9183		return -EOPNOTSUPP;
9184	}
9185}
9186
9187static int
9188ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9189			struct ice_netdev_priv *np,
9190			struct flow_block_offload *f, void *data,
9191			void (*cleanup)(struct flow_block_cb *block_cb))
9192{
9193	struct ice_indr_block_priv *indr_priv;
9194	struct flow_block_cb *block_cb;
9195
9196	if (!ice_is_tunnel_supported(netdev) &&
9197	    !(is_vlan_dev(netdev) &&
9198	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
9199		return -EOPNOTSUPP;
9200
9201	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9202		return -EOPNOTSUPP;
9203
9204	switch (f->command) {
9205	case FLOW_BLOCK_BIND:
9206		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9207		if (indr_priv)
9208			return -EEXIST;
9209
9210		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9211		if (!indr_priv)
9212			return -ENOMEM;
9213
9214		indr_priv->netdev = netdev;
9215		indr_priv->np = np;
9216		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9217
9218		block_cb =
9219			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9220						 indr_priv, indr_priv,
9221						 ice_rep_indr_tc_block_unbind,
9222						 f, netdev, sch, data, np,
9223						 cleanup);
9224
9225		if (IS_ERR(block_cb)) {
9226			list_del(&indr_priv->list);
9227			kfree(indr_priv);
9228			return PTR_ERR(block_cb);
9229		}
9230		flow_block_cb_add(block_cb, f);
9231		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9232		break;
9233	case FLOW_BLOCK_UNBIND:
9234		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9235		if (!indr_priv)
9236			return -ENOENT;
9237
9238		block_cb = flow_block_cb_lookup(f->block,
9239						ice_indr_setup_block_cb,
9240						indr_priv);
9241		if (!block_cb)
9242			return -ENOENT;
9243
9244		flow_indr_block_cb_remove(block_cb, f);
9245
9246		list_del(&block_cb->driver_list);
9247		break;
9248	default:
9249		return -EOPNOTSUPP;
9250	}
9251	return 0;
9252}
9253
9254static int
9255ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9256		     void *cb_priv, enum tc_setup_type type, void *type_data,
9257		     void *data,
9258		     void (*cleanup)(struct flow_block_cb *block_cb))
9259{
9260	switch (type) {
9261	case TC_SETUP_BLOCK:
9262		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9263					       data, cleanup);
9264
9265	default:
9266		return -EOPNOTSUPP;
9267	}
9268}
9269
9270/**
9271 * ice_open - Called when a network interface becomes active
9272 * @netdev: network interface device structure
9273 *
9274 * The open entry point is called when a network interface is made
9275 * active by the system (IFF_UP). At this point all resources needed
9276 * for transmit and receive operations are allocated, the interrupt
9277 * handler is registered with the OS, the netdev watchdog is enabled,
9278 * and the stack is notified that the interface is ready.
9279 *
9280 * Returns 0 on success, negative value on failure
9281 */
9282int ice_open(struct net_device *netdev)
9283{
9284	struct ice_netdev_priv *np = netdev_priv(netdev);
9285	struct ice_pf *pf = np->vsi->back;
9286
9287	if (ice_is_reset_in_progress(pf->state)) {
9288		netdev_err(netdev, "can't open net device while reset is in progress");
9289		return -EBUSY;
9290	}
9291
9292	return ice_open_internal(netdev);
9293}
9294
9295/**
9296 * ice_open_internal - Called when a network interface becomes active
9297 * @netdev: network interface device structure
9298 *
9299 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9300 * handling routine
9301 *
9302 * Returns 0 on success, negative value on failure
9303 */
9304int ice_open_internal(struct net_device *netdev)
9305{
9306	struct ice_netdev_priv *np = netdev_priv(netdev);
9307	struct ice_vsi *vsi = np->vsi;
9308	struct ice_pf *pf = vsi->back;
9309	struct ice_port_info *pi;
 
9310	int err;
9311
9312	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9313		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9314		return -EIO;
9315	}
9316
9317	netif_carrier_off(netdev);
9318
9319	pi = vsi->port_info;
9320	err = ice_update_link_info(pi);
9321	if (err) {
9322		netdev_err(netdev, "Failed to get link info, error %d\n", err);
9323		return err;
 
9324	}
9325
9326	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9327
9328	/* Set PHY if there is media, otherwise, turn off PHY */
9329	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9330		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9331		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9332			err = ice_init_phy_user_cfg(pi);
9333			if (err) {
9334				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9335					   err);
9336				return err;
9337			}
9338		}
9339
9340		err = ice_configure_phy(vsi);
9341		if (err) {
9342			netdev_err(netdev, "Failed to set physical link up, error %d\n",
9343				   err);
9344			return err;
9345		}
9346	} else {
9347		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9348		ice_set_link(vsi, false);
9349	}
9350
9351	err = ice_vsi_open(vsi);
9352	if (err)
9353		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9354			   vsi->vsi_num, vsi->vsw->sw_id);
9355
9356	/* Update existing tunnels information */
9357	udp_tunnel_get_rx_info(netdev);
9358
9359	return err;
9360}
9361
9362/**
9363 * ice_stop - Disables a network interface
9364 * @netdev: network interface device structure
9365 *
9366 * The stop entry point is called when an interface is de-activated by the OS,
9367 * and the netdevice enters the DOWN state. The hardware is still under the
9368 * driver's control, but the netdev interface is disabled.
9369 *
9370 * Returns success only - not allowed to fail
9371 */
9372int ice_stop(struct net_device *netdev)
9373{
9374	struct ice_netdev_priv *np = netdev_priv(netdev);
9375	struct ice_vsi *vsi = np->vsi;
9376	struct ice_pf *pf = vsi->back;
9377
9378	if (ice_is_reset_in_progress(pf->state)) {
9379		netdev_err(netdev, "can't stop net device while reset is in progress");
9380		return -EBUSY;
9381	}
9382
9383	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9384		int link_err = ice_force_phys_link_state(vsi, false);
9385
9386		if (link_err) {
9387			if (link_err == -ENOMEDIUM)
9388				netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9389					    vsi->vsi_num);
9390			else
9391				netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9392					   vsi->vsi_num, link_err);
9393
9394			ice_vsi_close(vsi);
9395			return -EIO;
9396		}
9397	}
9398
9399	ice_vsi_close(vsi);
9400
9401	return 0;
9402}
9403
9404/**
9405 * ice_features_check - Validate encapsulated packet conforms to limits
9406 * @skb: skb buffer
9407 * @netdev: This port's netdev
9408 * @features: Offload features that the stack believes apply
9409 */
9410static netdev_features_t
9411ice_features_check(struct sk_buff *skb,
9412		   struct net_device __always_unused *netdev,
9413		   netdev_features_t features)
9414{
9415	bool gso = skb_is_gso(skb);
9416	size_t len;
9417
9418	/* No point in doing any of this if neither checksum nor GSO are
9419	 * being requested for this frame. We can rule out both by just
9420	 * checking for CHECKSUM_PARTIAL
9421	 */
9422	if (skb->ip_summed != CHECKSUM_PARTIAL)
9423		return features;
9424
9425	/* We cannot support GSO if the MSS is going to be less than
9426	 * 64 bytes. If it is then we need to drop support for GSO.
9427	 */
9428	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9429		features &= ~NETIF_F_GSO_MASK;
9430
9431	len = skb_network_offset(skb);
9432	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9433		goto out_rm_features;
9434
9435	len = skb_network_header_len(skb);
9436	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9437		goto out_rm_features;
9438
9439	if (skb->encapsulation) {
9440		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
9441		 * the case of IPIP frames, the transport header pointer is
9442		 * after the inner header! So check to make sure that this
9443		 * is a GRE or UDP_TUNNEL frame before doing that math.
9444		 */
9445		if (gso && (skb_shinfo(skb)->gso_type &
9446			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9447			len = skb_inner_network_header(skb) -
9448			      skb_transport_header(skb);
9449			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9450				goto out_rm_features;
9451		}
9452
9453		len = skb_inner_network_header_len(skb);
 
9454		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9455			goto out_rm_features;
9456	}
9457
9458	return features;
9459out_rm_features:
9460	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9461}
9462
9463static const struct net_device_ops ice_netdev_safe_mode_ops = {
9464	.ndo_open = ice_open,
9465	.ndo_stop = ice_stop,
9466	.ndo_start_xmit = ice_start_xmit,
9467	.ndo_set_mac_address = ice_set_mac_address,
9468	.ndo_validate_addr = eth_validate_addr,
9469	.ndo_change_mtu = ice_change_mtu,
9470	.ndo_get_stats64 = ice_get_stats64,
9471	.ndo_tx_timeout = ice_tx_timeout,
9472	.ndo_bpf = ice_xdp_safe_mode,
9473};
9474
9475static const struct net_device_ops ice_netdev_ops = {
9476	.ndo_open = ice_open,
9477	.ndo_stop = ice_stop,
9478	.ndo_start_xmit = ice_start_xmit,
9479	.ndo_select_queue = ice_select_queue,
9480	.ndo_features_check = ice_features_check,
9481	.ndo_fix_features = ice_fix_features,
9482	.ndo_set_rx_mode = ice_set_rx_mode,
9483	.ndo_set_mac_address = ice_set_mac_address,
9484	.ndo_validate_addr = eth_validate_addr,
9485	.ndo_change_mtu = ice_change_mtu,
9486	.ndo_get_stats64 = ice_get_stats64,
9487	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
9488	.ndo_eth_ioctl = ice_eth_ioctl,
9489	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9490	.ndo_set_vf_mac = ice_set_vf_mac,
9491	.ndo_get_vf_config = ice_get_vf_cfg,
9492	.ndo_set_vf_trust = ice_set_vf_trust,
9493	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
9494	.ndo_set_vf_link_state = ice_set_vf_link_state,
9495	.ndo_get_vf_stats = ice_get_vf_stats,
9496	.ndo_set_vf_rate = ice_set_vf_bw,
9497	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9498	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9499	.ndo_setup_tc = ice_setup_tc,
9500	.ndo_set_features = ice_set_features,
9501	.ndo_bridge_getlink = ice_bridge_getlink,
9502	.ndo_bridge_setlink = ice_bridge_setlink,
9503	.ndo_fdb_add = ice_fdb_add,
9504	.ndo_fdb_del = ice_fdb_del,
9505#ifdef CONFIG_RFS_ACCEL
9506	.ndo_rx_flow_steer = ice_rx_flow_steer,
9507#endif
9508	.ndo_tx_timeout = ice_tx_timeout,
9509	.ndo_bpf = ice_xdp,
9510	.ndo_xdp_xmit = ice_xdp_xmit,
9511	.ndo_xsk_wakeup = ice_xsk_wakeup,
9512};
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4/* Intel(R) Ethernet Connection E800 Series Linux Driver */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <generated/utsrelease.h>
 
   9#include "ice.h"
  10#include "ice_base.h"
  11#include "ice_lib.h"
  12#include "ice_fltr.h"
  13#include "ice_dcb_lib.h"
  14#include "ice_dcb_nl.h"
  15#include "ice_devlink.h"
 
  16/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
  17 * ice tracepoint functions. This must be done exactly once across the
  18 * ice driver.
  19 */
  20#define CREATE_TRACE_POINTS
  21#include "ice_trace.h"
 
 
 
 
  22
  23#define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
  24static const char ice_driver_string[] = DRV_SUMMARY;
  25static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
  26
  27/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
  28#define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
  29#define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
  30
  31MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  32MODULE_DESCRIPTION(DRV_SUMMARY);
  33MODULE_LICENSE("GPL v2");
  34MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
  35
  36static int debug = -1;
  37module_param(debug, int, 0644);
  38#ifndef CONFIG_DYNAMIC_DEBUG
  39MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
  40#else
  41MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
  42#endif /* !CONFIG_DYNAMIC_DEBUG */
  43
  44static DEFINE_IDA(ice_aux_ida);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45
  46static struct workqueue_struct *ice_wq;
 
  47static const struct net_device_ops ice_netdev_safe_mode_ops;
  48static const struct net_device_ops ice_netdev_ops;
  49static int ice_vsi_open(struct ice_vsi *vsi);
  50
  51static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
  52
  53static void ice_vsi_release_all(struct ice_pf *pf);
  54
  55bool netif_is_ice(struct net_device *dev)
 
 
 
 
 
 
 
 
 
  56{
  57	return dev && (dev->netdev_ops == &ice_netdev_ops);
  58}
  59
  60/**
  61 * ice_get_tx_pending - returns number of Tx descriptors not processed
  62 * @ring: the ring of descriptors
  63 */
  64static u16 ice_get_tx_pending(struct ice_ring *ring)
  65{
  66	u16 head, tail;
  67
  68	head = ring->next_to_clean;
  69	tail = ring->next_to_use;
  70
  71	if (head != tail)
  72		return (head < tail) ?
  73			tail - head : (tail + ring->count - head);
  74	return 0;
  75}
  76
  77/**
  78 * ice_check_for_hang_subtask - check for and recover hung queues
  79 * @pf: pointer to PF struct
  80 */
  81static void ice_check_for_hang_subtask(struct ice_pf *pf)
  82{
  83	struct ice_vsi *vsi = NULL;
  84	struct ice_hw *hw;
  85	unsigned int i;
  86	int packets;
  87	u32 v;
  88
  89	ice_for_each_vsi(pf, v)
  90		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
  91			vsi = pf->vsi[v];
  92			break;
  93		}
  94
  95	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
  96		return;
  97
  98	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
  99		return;
 100
 101	hw = &vsi->back->hw;
 102
 103	for (i = 0; i < vsi->num_txq; i++) {
 104		struct ice_ring *tx_ring = vsi->tx_rings[i];
 
 
 
 
 
 
 
 
 
 
 105
 106		if (tx_ring && tx_ring->desc) {
 107			/* If packet counter has not changed the queue is
 108			 * likely stalled, so force an interrupt for this
 109			 * queue.
 110			 *
 111			 * prev_pkt would be negative if there was no
 112			 * pending work.
 113			 */
 114			packets = tx_ring->stats.pkts & INT_MAX;
 115			if (tx_ring->tx_stats.prev_pkt == packets) {
 116				/* Trigger sw interrupt to revive the queue */
 117				ice_trigger_sw_intr(hw, tx_ring->q_vector);
 118				continue;
 119			}
 120
 121			/* Memory barrier between read of packet count and call
 122			 * to ice_get_tx_pending()
 123			 */
 124			smp_rmb();
 125			tx_ring->tx_stats.prev_pkt =
 126			    ice_get_tx_pending(tx_ring) ? packets : -1;
 127		}
 128	}
 129}
 130
 131/**
 132 * ice_init_mac_fltr - Set initial MAC filters
 133 * @pf: board private structure
 134 *
 135 * Set initial set of MAC filters for PF VSI; configure filters for permanent
 136 * address and broadcast address. If an error is encountered, netdevice will be
 137 * unregistered.
 138 */
 139static int ice_init_mac_fltr(struct ice_pf *pf)
 140{
 141	enum ice_status status;
 142	struct ice_vsi *vsi;
 143	u8 *perm_addr;
 144
 145	vsi = ice_get_main_vsi(pf);
 146	if (!vsi)
 147		return -EINVAL;
 148
 149	perm_addr = vsi->port_info->mac.perm_addr;
 150	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
 151	if (status)
 152		return -EIO;
 153
 154	return 0;
 155}
 156
 157/**
 158 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
 159 * @netdev: the net device on which the sync is happening
 160 * @addr: MAC address to sync
 161 *
 162 * This is a callback function which is called by the in kernel device sync
 163 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
 164 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
 165 * MAC filters from the hardware.
 166 */
 167static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
 168{
 169	struct ice_netdev_priv *np = netdev_priv(netdev);
 170	struct ice_vsi *vsi = np->vsi;
 171
 172	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
 173				     ICE_FWD_TO_VSI))
 174		return -EINVAL;
 175
 176	return 0;
 177}
 178
 179/**
 180 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
 181 * @netdev: the net device on which the unsync is happening
 182 * @addr: MAC address to unsync
 183 *
 184 * This is a callback function which is called by the in kernel device unsync
 185 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
 186 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
 187 * delete the MAC filters from the hardware.
 188 */
 189static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
 190{
 191	struct ice_netdev_priv *np = netdev_priv(netdev);
 192	struct ice_vsi *vsi = np->vsi;
 193
 194	/* Under some circumstances, we might receive a request to delete our
 195	 * own device address from our uc list. Because we store the device
 196	 * address in the VSI's MAC filter list, we need to ignore such
 197	 * requests and not delete our device address from this list.
 198	 */
 199	if (ether_addr_equal(addr, netdev->dev_addr))
 200		return 0;
 201
 202	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
 203				     ICE_FWD_TO_VSI))
 204		return -EINVAL;
 205
 206	return 0;
 207}
 208
 209/**
 210 * ice_vsi_fltr_changed - check if filter state changed
 211 * @vsi: VSI to be checked
 212 *
 213 * returns true if filter state has changed, false otherwise.
 214 */
 215static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
 216{
 217	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
 218	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
 219	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
 220}
 221
 222/**
 223 * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
 224 * @vsi: the VSI being configured
 225 * @promisc_m: mask of promiscuous config bits
 226 * @set_promisc: enable or disable promisc flag request
 227 *
 228 */
 229static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
 230{
 231	struct ice_hw *hw = &vsi->back->hw;
 232	enum ice_status status = 0;
 233
 234	if (vsi->type != ICE_VSI_PF)
 235		return 0;
 236
 237	if (vsi->num_vlan > 1) {
 238		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
 239						  set_promisc);
 
 240	} else {
 241		if (set_promisc)
 242			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
 243						     0);
 244		else
 245			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
 246						       0);
 247	}
 
 
 248
 249	if (status)
 250		return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251
 252	return 0;
 
 
 253}
 254
 255/**
 256 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
 257 * @vsi: ptr to the VSI
 258 *
 259 * Push any outstanding VSI filter changes through the AdminQ.
 260 */
 261static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
 262{
 
 263	struct device *dev = ice_pf_to_dev(vsi->back);
 264	struct net_device *netdev = vsi->netdev;
 265	bool promisc_forced_on = false;
 266	struct ice_pf *pf = vsi->back;
 267	struct ice_hw *hw = &pf->hw;
 268	enum ice_status status = 0;
 269	u32 changed_flags = 0;
 270	u8 promisc_m;
 271	int err = 0;
 272
 273	if (!vsi->netdev)
 274		return -EINVAL;
 275
 276	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
 277		usleep_range(1000, 2000);
 278
 279	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
 280	vsi->current_netdev_flags = vsi->netdev->flags;
 281
 282	INIT_LIST_HEAD(&vsi->tmp_sync_list);
 283	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
 284
 285	if (ice_vsi_fltr_changed(vsi)) {
 286		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
 287		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 288		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
 289
 290		/* grab the netdev's addr_list_lock */
 291		netif_addr_lock_bh(netdev);
 292		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
 293			      ice_add_mac_to_unsync_list);
 294		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
 295			      ice_add_mac_to_unsync_list);
 296		/* our temp lists are populated. release lock */
 297		netif_addr_unlock_bh(netdev);
 298	}
 299
 300	/* Remove MAC addresses in the unsync list */
 301	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
 302	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
 303	if (status) {
 304		netdev_err(netdev, "Failed to delete MAC filters\n");
 305		/* if we failed because of alloc failures, just bail */
 306		if (status == ICE_ERR_NO_MEMORY) {
 307			err = -ENOMEM;
 308			goto out;
 309		}
 310	}
 311
 312	/* Add MAC addresses in the sync list */
 313	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
 314	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
 315	/* If filter is added successfully or already exists, do not go into
 316	 * 'if' condition and report it as error. Instead continue processing
 317	 * rest of the function.
 318	 */
 319	if (status && status != ICE_ERR_ALREADY_EXISTS) {
 320		netdev_err(netdev, "Failed to add MAC filters\n");
 321		/* If there is no more space for new umac filters, VSI
 322		 * should go into promiscuous mode. There should be some
 323		 * space reserved for promiscuous filters.
 324		 */
 325		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
 326		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
 327				      vsi->state)) {
 328			promisc_forced_on = true;
 329			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
 330				    vsi->vsi_num);
 331		} else {
 332			err = -EIO;
 333			goto out;
 334		}
 335	}
 
 336	/* check for changes in promiscuous modes */
 337	if (changed_flags & IFF_ALLMULTI) {
 338		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
 339			if (vsi->num_vlan > 1)
 340				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
 341			else
 342				promisc_m = ICE_MCAST_PROMISC_BITS;
 343
 344			err = ice_cfg_promisc(vsi, promisc_m, true);
 345			if (err) {
 346				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
 347					   vsi->vsi_num);
 348				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
 349				goto out_promisc;
 350			}
 351		} else {
 352			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
 353			if (vsi->num_vlan > 1)
 354				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
 355			else
 356				promisc_m = ICE_MCAST_PROMISC_BITS;
 357
 358			err = ice_cfg_promisc(vsi, promisc_m, false);
 359			if (err) {
 360				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
 361					   vsi->vsi_num);
 362				vsi->current_netdev_flags |= IFF_ALLMULTI;
 363				goto out_promisc;
 364			}
 365		}
 366	}
 367
 368	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
 369	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
 370		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
 371		if (vsi->current_netdev_flags & IFF_PROMISC) {
 372			/* Apply Rx filter rule to get traffic from wire */
 373			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
 374				err = ice_set_dflt_vsi(pf->first_sw, vsi);
 375				if (err && err != -EEXIST) {
 376					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
 377						   err, vsi->vsi_num);
 378					vsi->current_netdev_flags &=
 379						~IFF_PROMISC;
 380					goto out_promisc;
 381				}
 382				ice_cfg_vlan_pruning(vsi, false, false);
 
 
 
 
 
 
 
 
 
 
 
 383			}
 384		} else {
 385			/* Clear Rx filter to remove traffic from wire */
 386			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
 387				err = ice_clear_dflt_vsi(pf->first_sw);
 388				if (err) {
 389					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
 390						   err, vsi->vsi_num);
 391					vsi->current_netdev_flags |=
 392						IFF_PROMISC;
 393					goto out_promisc;
 394				}
 395				if (vsi->num_vlan > 1)
 396					ice_cfg_vlan_pruning(vsi, true, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 397			}
 398		}
 399	}
 400	goto exit;
 401
 402out_promisc:
 403	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
 404	goto exit;
 405out:
 406	/* if something went wrong then set the changed flag so we try again */
 407	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
 408	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 409exit:
 410	clear_bit(ICE_CFG_BUSY, vsi->state);
 411	return err;
 412}
 413
 414/**
 415 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
 416 * @pf: board private structure
 417 */
 418static void ice_sync_fltr_subtask(struct ice_pf *pf)
 419{
 420	int v;
 421
 422	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
 423		return;
 424
 425	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 426
 427	ice_for_each_vsi(pf, v)
 428		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
 429		    ice_vsi_sync_fltr(pf->vsi[v])) {
 430			/* come back and try again later */
 431			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 432			break;
 433		}
 434}
 435
 436/**
 437 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
 438 * @pf: the PF
 439 * @locked: is the rtnl_lock already held
 440 */
 441static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
 442{
 443	int node;
 444	int v;
 445
 446	ice_for_each_vsi(pf, v)
 447		if (pf->vsi[v])
 448			ice_dis_vsi(pf->vsi[v], locked);
 449
 450	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
 451		pf->pf_agg_node[node].num_vsis = 0;
 452
 453	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
 454		pf->vf_agg_node[node].num_vsis = 0;
 455}
 456
 457/**
 458 * ice_prepare_for_reset - prep for the core to reset
 459 * @pf: board private structure
 460 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461 * Inform or close all dependent features in prep for reset.
 462 */
 463static void
 464ice_prepare_for_reset(struct ice_pf *pf)
 465{
 466	struct ice_hw *hw = &pf->hw;
 467	unsigned int i;
 
 
 
 
 468
 469	/* already prepared for reset */
 470	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
 471		return;
 472
 473	ice_unplug_aux_dev(pf);
 474
 475	/* Notify VFs of impending reset */
 476	if (ice_check_sq_alive(hw, &hw->mailboxq))
 477		ice_vc_notify_reset(pf);
 478
 479	/* Disable VFs until reset is completed */
 480	ice_for_each_vf(pf, i)
 481		ice_set_vf_state_qs_dis(&pf->vf[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482
 483	/* clear SW filtering DB */
 484	ice_clear_hw_tbls(hw);
 485	/* disable the VSIs and their queues that are not already DOWN */
 486	ice_pf_dis_all_vsi(pf, false);
 487
 488	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
 489		ice_ptp_release(pf);
 
 
 
 490
 491	if (hw->port_info)
 492		ice_sched_clear_port(hw->port_info);
 493
 494	ice_shutdown_all_ctrlq(hw);
 495
 496	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
 497}
 498
 499/**
 500 * ice_do_reset - Initiate one of many types of resets
 501 * @pf: board private structure
 502 * @reset_type: reset type requested
 503 * before this function was called.
 504 */
 505static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
 506{
 507	struct device *dev = ice_pf_to_dev(pf);
 508	struct ice_hw *hw = &pf->hw;
 509
 510	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
 511
 512	ice_prepare_for_reset(pf);
 
 
 
 
 
 513
 514	/* trigger the reset */
 515	if (ice_reset(hw, reset_type)) {
 516		dev_err(dev, "reset %d failed\n", reset_type);
 517		set_bit(ICE_RESET_FAILED, pf->state);
 518		clear_bit(ICE_RESET_OICR_RECV, pf->state);
 519		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 520		clear_bit(ICE_PFR_REQ, pf->state);
 521		clear_bit(ICE_CORER_REQ, pf->state);
 522		clear_bit(ICE_GLOBR_REQ, pf->state);
 523		wake_up(&pf->reset_wait_queue);
 524		return;
 525	}
 526
 527	/* PFR is a bit of a special case because it doesn't result in an OICR
 528	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
 529	 * associated state bits.
 530	 */
 531	if (reset_type == ICE_RESET_PFR) {
 532		pf->pfr_count++;
 533		ice_rebuild(pf, reset_type);
 534		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 535		clear_bit(ICE_PFR_REQ, pf->state);
 536		wake_up(&pf->reset_wait_queue);
 537		ice_reset_all_vfs(pf, true);
 538	}
 539}
 540
 541/**
 542 * ice_reset_subtask - Set up for resetting the device and driver
 543 * @pf: board private structure
 544 */
 545static void ice_reset_subtask(struct ice_pf *pf)
 546{
 547	enum ice_reset_req reset_type = ICE_RESET_INVAL;
 548
 549	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
 550	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
 551	 * of reset is pending and sets bits in pf->state indicating the reset
 552	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
 553	 * prepare for pending reset if not already (for PF software-initiated
 554	 * global resets the software should already be prepared for it as
 555	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
 556	 * by firmware or software on other PFs, that bit is not set so prepare
 557	 * for the reset now), poll for reset done, rebuild and return.
 558	 */
 559	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
 560		/* Perform the largest reset requested */
 561		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
 562			reset_type = ICE_RESET_CORER;
 563		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
 564			reset_type = ICE_RESET_GLOBR;
 565		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
 566			reset_type = ICE_RESET_EMPR;
 567		/* return if no valid reset type requested */
 568		if (reset_type == ICE_RESET_INVAL)
 569			return;
 570		ice_prepare_for_reset(pf);
 571
 572		/* make sure we are ready to rebuild */
 573		if (ice_check_reset(&pf->hw)) {
 574			set_bit(ICE_RESET_FAILED, pf->state);
 575		} else {
 576			/* done with reset. start rebuild */
 577			pf->hw.reset_ongoing = false;
 578			ice_rebuild(pf, reset_type);
 579			/* clear bit to resume normal operations, but
 580			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
 581			 */
 582			clear_bit(ICE_RESET_OICR_RECV, pf->state);
 583			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 584			clear_bit(ICE_PFR_REQ, pf->state);
 585			clear_bit(ICE_CORER_REQ, pf->state);
 586			clear_bit(ICE_GLOBR_REQ, pf->state);
 587			wake_up(&pf->reset_wait_queue);
 588			ice_reset_all_vfs(pf, true);
 589		}
 590
 591		return;
 592	}
 593
 594	/* No pending resets to finish processing. Check for new resets */
 595	if (test_bit(ICE_PFR_REQ, pf->state))
 596		reset_type = ICE_RESET_PFR;
 
 
 
 
 
 597	if (test_bit(ICE_CORER_REQ, pf->state))
 598		reset_type = ICE_RESET_CORER;
 599	if (test_bit(ICE_GLOBR_REQ, pf->state))
 600		reset_type = ICE_RESET_GLOBR;
 601	/* If no valid reset type requested just return */
 602	if (reset_type == ICE_RESET_INVAL)
 603		return;
 604
 605	/* reset if not already down or busy */
 606	if (!test_bit(ICE_DOWN, pf->state) &&
 607	    !test_bit(ICE_CFG_BUSY, pf->state)) {
 608		ice_do_reset(pf, reset_type);
 609	}
 610}
 611
 612/**
 613 * ice_print_topo_conflict - print topology conflict message
 614 * @vsi: the VSI whose topology status is being checked
 615 */
 616static void ice_print_topo_conflict(struct ice_vsi *vsi)
 617{
 618	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
 619	case ICE_AQ_LINK_TOPO_CONFLICT:
 620	case ICE_AQ_LINK_MEDIA_CONFLICT:
 621	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
 622	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
 623	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
 624		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
 625		break;
 626	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
 627		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
 
 
 
 628		break;
 629	default:
 630		break;
 631	}
 632}
 633
 634/**
 635 * ice_print_link_msg - print link up or down message
 636 * @vsi: the VSI whose link status is being queried
 637 * @isup: boolean for if the link is now up or down
 638 */
 639void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
 640{
 641	struct ice_aqc_get_phy_caps_data *caps;
 642	const char *an_advertised;
 643	enum ice_status status;
 644	const char *fec_req;
 645	const char *speed;
 646	const char *fec;
 647	const char *fc;
 648	const char *an;
 
 649
 650	if (!vsi)
 651		return;
 652
 653	if (vsi->current_isup == isup)
 654		return;
 655
 656	vsi->current_isup = isup;
 657
 658	if (!isup) {
 659		netdev_info(vsi->netdev, "NIC Link is Down\n");
 660		return;
 661	}
 662
 663	switch (vsi->port_info->phy.link_info.link_speed) {
 664	case ICE_AQ_LINK_SPEED_100GB:
 665		speed = "100 G";
 666		break;
 667	case ICE_AQ_LINK_SPEED_50GB:
 668		speed = "50 G";
 669		break;
 670	case ICE_AQ_LINK_SPEED_40GB:
 671		speed = "40 G";
 672		break;
 673	case ICE_AQ_LINK_SPEED_25GB:
 674		speed = "25 G";
 675		break;
 676	case ICE_AQ_LINK_SPEED_20GB:
 677		speed = "20 G";
 678		break;
 679	case ICE_AQ_LINK_SPEED_10GB:
 680		speed = "10 G";
 681		break;
 682	case ICE_AQ_LINK_SPEED_5GB:
 683		speed = "5 G";
 684		break;
 685	case ICE_AQ_LINK_SPEED_2500MB:
 686		speed = "2.5 G";
 687		break;
 688	case ICE_AQ_LINK_SPEED_1000MB:
 689		speed = "1 G";
 690		break;
 691	case ICE_AQ_LINK_SPEED_100MB:
 692		speed = "100 M";
 693		break;
 694	default:
 695		speed = "Unknown ";
 696		break;
 697	}
 698
 699	switch (vsi->port_info->fc.current_mode) {
 700	case ICE_FC_FULL:
 701		fc = "Rx/Tx";
 702		break;
 703	case ICE_FC_TX_PAUSE:
 704		fc = "Tx";
 705		break;
 706	case ICE_FC_RX_PAUSE:
 707		fc = "Rx";
 708		break;
 709	case ICE_FC_NONE:
 710		fc = "None";
 711		break;
 712	default:
 713		fc = "Unknown";
 714		break;
 715	}
 716
 717	/* Get FEC mode based on negotiated link info */
 718	switch (vsi->port_info->phy.link_info.fec_info) {
 719	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
 720	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
 721		fec = "RS-FEC";
 722		break;
 723	case ICE_AQ_LINK_25G_KR_FEC_EN:
 724		fec = "FC-FEC/BASE-R";
 725		break;
 726	default:
 727		fec = "NONE";
 728		break;
 729	}
 730
 731	/* check if autoneg completed, might be false due to not supported */
 732	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
 733		an = "True";
 734	else
 735		an = "False";
 736
 737	/* Get FEC mode requested based on PHY caps last SW configuration */
 738	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
 739	if (!caps) {
 740		fec_req = "Unknown";
 741		an_advertised = "Unknown";
 742		goto done;
 743	}
 744
 745	status = ice_aq_get_phy_caps(vsi->port_info, false,
 746				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
 747	if (status)
 748		netdev_info(vsi->netdev, "Get phy capability failed.\n");
 749
 750	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
 751
 752	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
 753	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
 754		fec_req = "RS-FEC";
 755	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
 756		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
 757		fec_req = "FC-FEC/BASE-R";
 758	else
 759		fec_req = "NONE";
 760
 761	kfree(caps);
 762
 763done:
 764	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
 765		    speed, fec_req, fec, an_advertised, an, fc);
 766	ice_print_topo_conflict(vsi);
 767}
 768
 769/**
 770 * ice_vsi_link_event - update the VSI's netdev
 771 * @vsi: the VSI on which the link event occurred
 772 * @link_up: whether or not the VSI needs to be set up or down
 773 */
 774static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
 775{
 776	if (!vsi)
 777		return;
 778
 779	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
 780		return;
 781
 782	if (vsi->type == ICE_VSI_PF) {
 783		if (link_up == netif_carrier_ok(vsi->netdev))
 784			return;
 785
 786		if (link_up) {
 787			netif_carrier_on(vsi->netdev);
 788			netif_tx_wake_all_queues(vsi->netdev);
 789		} else {
 790			netif_carrier_off(vsi->netdev);
 791			netif_tx_stop_all_queues(vsi->netdev);
 792		}
 793	}
 794}
 795
 796/**
 797 * ice_set_dflt_mib - send a default config MIB to the FW
 798 * @pf: private PF struct
 799 *
 800 * This function sends a default configuration MIB to the FW.
 801 *
 802 * If this function errors out at any point, the driver is still able to
 803 * function.  The main impact is that LFC may not operate as expected.
 804 * Therefore an error state in this function should be treated with a DBG
 805 * message and continue on with driver rebuild/reenable.
 806 */
 807static void ice_set_dflt_mib(struct ice_pf *pf)
 808{
 809	struct device *dev = ice_pf_to_dev(pf);
 810	u8 mib_type, *buf, *lldpmib = NULL;
 811	u16 len, typelen, offset = 0;
 812	struct ice_lldp_org_tlv *tlv;
 813	struct ice_hw *hw = &pf->hw;
 814	u32 ouisubtype;
 815
 816	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
 817	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
 818	if (!lldpmib) {
 819		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
 820			__func__);
 821		return;
 822	}
 823
 824	/* Add ETS CFG TLV */
 825	tlv = (struct ice_lldp_org_tlv *)lldpmib;
 826	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
 827		   ICE_IEEE_ETS_TLV_LEN);
 828	tlv->typelen = htons(typelen);
 829	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 830		      ICE_IEEE_SUBTYPE_ETS_CFG);
 831	tlv->ouisubtype = htonl(ouisubtype);
 832
 833	buf = tlv->tlvinfo;
 834	buf[0] = 0;
 835
 836	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
 837	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
 838	 * Octets 13 - 20 are TSA values - leave as zeros
 839	 */
 840	buf[5] = 0x64;
 841	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
 842	offset += len + 2;
 843	tlv = (struct ice_lldp_org_tlv *)
 844		((char *)tlv + sizeof(tlv->typelen) + len);
 845
 846	/* Add ETS REC TLV */
 847	buf = tlv->tlvinfo;
 848	tlv->typelen = htons(typelen);
 849
 850	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 851		      ICE_IEEE_SUBTYPE_ETS_REC);
 852	tlv->ouisubtype = htonl(ouisubtype);
 853
 854	/* First octet of buf is reserved
 855	 * Octets 1 - 4 map UP to TC - all UPs map to zero
 856	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
 857	 * Octets 13 - 20 are TSA value - leave as zeros
 858	 */
 859	buf[5] = 0x64;
 860	offset += len + 2;
 861	tlv = (struct ice_lldp_org_tlv *)
 862		((char *)tlv + sizeof(tlv->typelen) + len);
 863
 864	/* Add PFC CFG TLV */
 865	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
 866		   ICE_IEEE_PFC_TLV_LEN);
 867	tlv->typelen = htons(typelen);
 868
 869	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 870		      ICE_IEEE_SUBTYPE_PFC_CFG);
 871	tlv->ouisubtype = htonl(ouisubtype);
 872
 873	/* Octet 1 left as all zeros - PFC disabled */
 874	buf[0] = 0x08;
 875	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
 876	offset += len + 2;
 877
 878	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
 879		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
 880
 881	kfree(lldpmib);
 882}
 883
 884/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 885 * ice_check_module_power
 886 * @pf: pointer to PF struct
 887 * @link_cfg_err: bitmap from the link info structure
 888 *
 889 * check module power level returned by a previous call to aq_get_link_info
 890 * and print error messages if module power level is not supported
 891 */
 892static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
 893{
 894	/* if module power level is supported, clear the flag */
 895	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
 896			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
 897		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
 898		return;
 899	}
 900
 901	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
 902	 * above block didn't clear this bit, there's nothing to do
 903	 */
 904	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
 905		return;
 906
 907	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
 908		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
 909		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
 910	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
 911		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
 912		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
 913	}
 914}
 915
 916/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 917 * ice_link_event - process the link event
 918 * @pf: PF that the link event is associated with
 919 * @pi: port_info for the port that the link event is associated with
 920 * @link_up: true if the physical link is up and false if it is down
 921 * @link_speed: current link speed received from the link event
 922 *
 923 * Returns 0 on success and negative on failure
 924 */
 925static int
 926ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
 927	       u16 link_speed)
 928{
 929	struct device *dev = ice_pf_to_dev(pf);
 930	struct ice_phy_info *phy_info;
 931	enum ice_status status;
 932	struct ice_vsi *vsi;
 933	u16 old_link_speed;
 934	bool old_link;
 
 935
 936	phy_info = &pi->phy;
 937	phy_info->link_info_old = phy_info->link_info;
 938
 939	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
 940	old_link_speed = phy_info->link_info_old.link_speed;
 941
 942	/* update the link info structures and re-enable link events,
 943	 * don't bail on failure due to other book keeping needed
 944	 */
 945	status = ice_update_link_info(pi);
 946	if (status)
 947		dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
 948			pi->lport, ice_stat_str(status),
 949			ice_aq_str(pi->hw->adminq.sq_last_status));
 950
 951	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
 952
 953	/* Check if the link state is up after updating link info, and treat
 954	 * this event as an UP event since the link is actually UP now.
 955	 */
 956	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
 957		link_up = true;
 958
 959	vsi = ice_get_main_vsi(pf);
 960	if (!vsi || !vsi->port_info)
 961		return -EINVAL;
 962
 963	/* turn off PHY if media was removed */
 964	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
 965	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
 966		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
 967		ice_set_link(vsi, false);
 968	}
 969
 970	/* if the old link up/down and speed is the same as the new */
 971	if (link_up == old_link && link_speed == old_link_speed)
 972		return 0;
 973
 
 
 974	if (ice_is_dcb_active(pf)) {
 975		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
 976			ice_dcb_rebuild(pf);
 977	} else {
 978		if (link_up)
 979			ice_set_dflt_mib(pf);
 980	}
 981	ice_vsi_link_event(vsi, link_up);
 982	ice_print_link_msg(vsi, link_up);
 983
 984	ice_vc_notify_link_state(pf);
 985
 986	return 0;
 987}
 988
 989/**
 990 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
 991 * @pf: board private structure
 992 */
 993static void ice_watchdog_subtask(struct ice_pf *pf)
 994{
 995	int i;
 996
 997	/* if interface is down do nothing */
 998	if (test_bit(ICE_DOWN, pf->state) ||
 999	    test_bit(ICE_CFG_BUSY, pf->state))
1000		return;
1001
1002	/* make sure we don't do these things too often */
1003	if (time_before(jiffies,
1004			pf->serv_tmr_prev + pf->serv_tmr_period))
1005		return;
1006
1007	pf->serv_tmr_prev = jiffies;
1008
1009	/* Update the stats for active netdevs so the network stack
1010	 * can look at updated numbers whenever it cares to
1011	 */
1012	ice_update_pf_stats(pf);
1013	ice_for_each_vsi(pf, i)
1014		if (pf->vsi[i] && pf->vsi[i]->netdev)
1015			ice_update_vsi_stats(pf->vsi[i]);
1016}
1017
1018/**
1019 * ice_init_link_events - enable/initialize link events
1020 * @pi: pointer to the port_info instance
1021 *
1022 * Returns -EIO on failure, 0 on success
1023 */
1024static int ice_init_link_events(struct ice_port_info *pi)
1025{
1026	u16 mask;
1027
1028	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1029		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
 
1030
1031	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1032		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1033			pi->lport);
1034		return -EIO;
1035	}
1036
1037	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1038		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1039			pi->lport);
1040		return -EIO;
1041	}
1042
1043	return 0;
1044}
1045
1046/**
1047 * ice_handle_link_event - handle link event via ARQ
1048 * @pf: PF that the link event is associated with
1049 * @event: event structure containing link status info
1050 */
1051static int
1052ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1053{
1054	struct ice_aqc_get_link_status_data *link_data;
1055	struct ice_port_info *port_info;
1056	int status;
1057
1058	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1059	port_info = pf->hw.port_info;
1060	if (!port_info)
1061		return -EINVAL;
1062
1063	status = ice_link_event(pf, port_info,
1064				!!(link_data->link_info & ICE_AQ_LINK_UP),
1065				le16_to_cpu(link_data->link_speed));
1066	if (status)
1067		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1068			status);
1069
1070	return status;
1071}
1072
1073enum ice_aq_task_state {
1074	ICE_AQ_TASK_WAITING = 0,
1075	ICE_AQ_TASK_COMPLETE,
1076	ICE_AQ_TASK_CANCELED,
1077};
 
 
 
 
 
 
 
 
 
 
1078
1079struct ice_aq_task {
1080	struct hlist_node entry;
1081
1082	u16 opcode;
1083	struct ice_rq_event_info *event;
1084	enum ice_aq_task_state state;
1085};
 
 
1086
1087/**
1088 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1089 * @pf: pointer to the PF private structure
 
1090 * @opcode: the opcode to wait for
1091 * @timeout: how long to wait, in jiffies
1092 * @event: storage for the event info
1093 *
1094 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1095 * current thread will be put to sleep until the specified event occurs or
1096 * until the given timeout is reached.
 
 
1097 *
1098 * To obtain only the descriptor contents, pass an event without an allocated
1099 * msg_buf. If the complete data buffer is desired, allocate the
1100 * event->msg_buf with enough space ahead of time.
1101 *
1102 * Returns: zero on success, or a negative error code on failure.
1103 */
1104int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1105			  struct ice_rq_event_info *event)
1106{
1107	struct device *dev = ice_pf_to_dev(pf);
1108	struct ice_aq_task *task;
1109	unsigned long start;
1110	long ret;
1111	int err;
1112
1113	task = kzalloc(sizeof(*task), GFP_KERNEL);
1114	if (!task)
1115		return -ENOMEM;
1116
1117	INIT_HLIST_NODE(&task->entry);
1118	task->opcode = opcode;
1119	task->event = event;
1120	task->state = ICE_AQ_TASK_WAITING;
1121
1122	spin_lock_bh(&pf->aq_wait_lock);
1123	hlist_add_head(&task->entry, &pf->aq_wait_list);
1124	spin_unlock_bh(&pf->aq_wait_lock);
 
1125
1126	start = jiffies;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1127
1128	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
 
1129					       timeout);
1130	switch (task->state) {
 
 
 
 
1131	case ICE_AQ_TASK_WAITING:
1132		err = ret < 0 ? ret : -ETIMEDOUT;
1133		break;
1134	case ICE_AQ_TASK_CANCELED:
1135		err = ret < 0 ? ret : -ECANCELED;
1136		break;
1137	case ICE_AQ_TASK_COMPLETE:
1138		err = ret < 0 ? ret : 0;
1139		break;
1140	default:
1141		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1142		err = -EINVAL;
1143		break;
1144	}
1145
1146	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1147		jiffies_to_msecs(jiffies - start),
1148		jiffies_to_msecs(timeout),
1149		opcode);
1150
1151	spin_lock_bh(&pf->aq_wait_lock);
1152	hlist_del(&task->entry);
1153	spin_unlock_bh(&pf->aq_wait_lock);
1154	kfree(task);
1155
1156	return err;
1157}
1158
1159/**
1160 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1161 * @pf: pointer to the PF private structure
1162 * @opcode: the opcode of the event
1163 * @event: the event to check
1164 *
1165 * Loops over the current list of pending threads waiting for an AdminQ event.
1166 * For each matching task, copy the contents of the event into the task
1167 * structure and wake up the thread.
1168 *
1169 * If multiple threads wait for the same opcode, they will all be woken up.
1170 *
1171 * Note that event->msg_buf will only be duplicated if the event has a buffer
1172 * with enough space already allocated. Otherwise, only the descriptor and
1173 * message length will be copied.
1174 *
1175 * Returns: true if an event was found, false otherwise
1176 */
1177static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1178				struct ice_rq_event_info *event)
1179{
 
1180	struct ice_aq_task *task;
1181	bool found = false;
1182
1183	spin_lock_bh(&pf->aq_wait_lock);
1184	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1185		if (task->state || task->opcode != opcode)
 
 
1186			continue;
1187
1188		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1189		task->event->msg_len = event->msg_len;
 
1190
1191		/* Only copy the data buffer if a destination was set */
1192		if (task->event->msg_buf &&
1193		    task->event->buf_len > event->buf_len) {
1194			memcpy(task->event->msg_buf, event->msg_buf,
1195			       event->buf_len);
1196			task->event->buf_len = event->buf_len;
1197		}
1198
1199		task->state = ICE_AQ_TASK_COMPLETE;
1200		found = true;
1201	}
1202	spin_unlock_bh(&pf->aq_wait_lock);
1203
1204	if (found)
1205		wake_up(&pf->aq_wait_queue);
1206}
1207
1208/**
1209 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1210 * @pf: the PF private structure
1211 *
1212 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1213 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1214 */
1215static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1216{
1217	struct ice_aq_task *task;
1218
1219	spin_lock_bh(&pf->aq_wait_lock);
1220	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1221		task->state = ICE_AQ_TASK_CANCELED;
1222	spin_unlock_bh(&pf->aq_wait_lock);
1223
1224	wake_up(&pf->aq_wait_queue);
1225}
1226
 
 
1227/**
1228 * __ice_clean_ctrlq - helper function to clean controlq rings
1229 * @pf: ptr to struct ice_pf
1230 * @q_type: specific Control queue type
1231 */
1232static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1233{
1234	struct device *dev = ice_pf_to_dev(pf);
1235	struct ice_rq_event_info event;
1236	struct ice_hw *hw = &pf->hw;
1237	struct ice_ctl_q_info *cq;
1238	u16 pending, i = 0;
1239	const char *qtype;
1240	u32 oldval, val;
1241
1242	/* Do not clean control queue if/when PF reset fails */
1243	if (test_bit(ICE_RESET_FAILED, pf->state))
1244		return 0;
1245
1246	switch (q_type) {
1247	case ICE_CTL_Q_ADMIN:
1248		cq = &hw->adminq;
1249		qtype = "Admin";
1250		break;
1251	case ICE_CTL_Q_SB:
1252		cq = &hw->sbq;
1253		qtype = "Sideband";
1254		break;
1255	case ICE_CTL_Q_MAILBOX:
1256		cq = &hw->mailboxq;
1257		qtype = "Mailbox";
1258		/* we are going to try to detect a malicious VF, so set the
1259		 * state to begin detection
1260		 */
1261		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1262		break;
1263	default:
1264		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1265		return 0;
1266	}
1267
1268	/* check for error indications - PF_xx_AxQLEN register layout for
1269	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1270	 */
1271	val = rd32(hw, cq->rq.len);
1272	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1273		   PF_FW_ARQLEN_ARQCRIT_M)) {
1274		oldval = val;
1275		if (val & PF_FW_ARQLEN_ARQVFE_M)
1276			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1277				qtype);
1278		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1279			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1280				qtype);
1281		}
1282		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1283			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1284				qtype);
1285		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1286			 PF_FW_ARQLEN_ARQCRIT_M);
1287		if (oldval != val)
1288			wr32(hw, cq->rq.len, val);
1289	}
1290
1291	val = rd32(hw, cq->sq.len);
1292	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1293		   PF_FW_ATQLEN_ATQCRIT_M)) {
1294		oldval = val;
1295		if (val & PF_FW_ATQLEN_ATQVFE_M)
1296			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1297				qtype);
1298		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1299			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1300				qtype);
1301		}
1302		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1303			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1304				qtype);
1305		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1306			 PF_FW_ATQLEN_ATQCRIT_M);
1307		if (oldval != val)
1308			wr32(hw, cq->sq.len, val);
1309	}
1310
1311	event.buf_len = cq->rq_buf_size;
1312	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1313	if (!event.msg_buf)
1314		return 0;
1315
1316	do {
1317		enum ice_status ret;
1318		u16 opcode;
 
1319
1320		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1321		if (ret == ICE_ERR_AQ_NO_WORK)
1322			break;
1323		if (ret) {
1324			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1325				ice_stat_str(ret));
1326			break;
1327		}
1328
1329		opcode = le16_to_cpu(event.desc.opcode);
1330
1331		/* Notify any thread that might be waiting for this event */
1332		ice_aq_check_events(pf, opcode, &event);
1333
1334		switch (opcode) {
1335		case ice_aqc_opc_get_link_status:
1336			if (ice_handle_link_event(pf, &event))
1337				dev_err(dev, "Could not handle link event\n");
1338			break;
1339		case ice_aqc_opc_event_lan_overflow:
1340			ice_vf_lan_overflow_event(pf, &event);
1341			break;
1342		case ice_mbx_opc_send_msg_to_pf:
1343			if (!ice_is_malicious_vf(pf, &event, i, pending))
1344				ice_vc_process_vf_msg(pf, &event);
 
 
 
 
1345			break;
1346		case ice_aqc_opc_fw_logging:
1347			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1348			break;
1349		case ice_aqc_opc_lldp_set_mib_change:
1350			ice_dcb_process_lldp_set_mib_change(pf, &event);
1351			break;
1352		default:
1353			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1354				qtype, opcode);
1355			break;
1356		}
1357	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1358
1359	kfree(event.msg_buf);
1360
1361	return pending && (i == ICE_DFLT_IRQ_WORK);
1362}
1363
1364/**
1365 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1366 * @hw: pointer to hardware info
1367 * @cq: control queue information
1368 *
1369 * returns true if there are pending messages in a queue, false if there aren't
1370 */
1371static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1372{
1373	u16 ntu;
1374
1375	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1376	return cq->rq.next_to_clean != ntu;
1377}
1378
1379/**
1380 * ice_clean_adminq_subtask - clean the AdminQ rings
1381 * @pf: board private structure
1382 */
1383static void ice_clean_adminq_subtask(struct ice_pf *pf)
1384{
1385	struct ice_hw *hw = &pf->hw;
1386
1387	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1388		return;
1389
1390	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1391		return;
1392
1393	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1394
1395	/* There might be a situation where new messages arrive to a control
1396	 * queue between processing the last message and clearing the
1397	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1398	 * ice_ctrlq_pending) and process new messages if any.
1399	 */
1400	if (ice_ctrlq_pending(hw, &hw->adminq))
1401		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1402
1403	ice_flush(hw);
1404}
1405
1406/**
1407 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1408 * @pf: board private structure
1409 */
1410static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1411{
1412	struct ice_hw *hw = &pf->hw;
1413
1414	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1415		return;
1416
1417	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1418		return;
1419
1420	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1421
1422	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1423		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1424
1425	ice_flush(hw);
1426}
1427
1428/**
1429 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1430 * @pf: board private structure
1431 */
1432static void ice_clean_sbq_subtask(struct ice_pf *pf)
1433{
1434	struct ice_hw *hw = &pf->hw;
1435
1436	/* Nothing to do here if sideband queue is not supported */
1437	if (!ice_is_sbq_supported(hw)) {
 
 
1438		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1439		return;
1440	}
1441
1442	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1443		return;
1444
1445	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1446		return;
1447
1448	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1449
1450	if (ice_ctrlq_pending(hw, &hw->sbq))
1451		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1452
1453	ice_flush(hw);
1454}
1455
1456/**
1457 * ice_service_task_schedule - schedule the service task to wake up
1458 * @pf: board private structure
1459 *
1460 * If not already scheduled, this puts the task into the work queue.
1461 */
1462void ice_service_task_schedule(struct ice_pf *pf)
1463{
1464	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1465	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1466	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1467		queue_work(ice_wq, &pf->serv_task);
1468}
1469
1470/**
1471 * ice_service_task_complete - finish up the service task
1472 * @pf: board private structure
1473 */
1474static void ice_service_task_complete(struct ice_pf *pf)
1475{
1476	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1477
1478	/* force memory (pf->state) to sync before next service task */
1479	smp_mb__before_atomic();
1480	clear_bit(ICE_SERVICE_SCHED, pf->state);
1481}
1482
1483/**
1484 * ice_service_task_stop - stop service task and cancel works
1485 * @pf: board private structure
1486 *
1487 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1488 * 1 otherwise.
1489 */
1490static int ice_service_task_stop(struct ice_pf *pf)
1491{
1492	int ret;
1493
1494	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1495
1496	if (pf->serv_tmr.function)
1497		del_timer_sync(&pf->serv_tmr);
1498	if (pf->serv_task.func)
1499		cancel_work_sync(&pf->serv_task);
1500
1501	clear_bit(ICE_SERVICE_SCHED, pf->state);
1502	return ret;
1503}
1504
1505/**
1506 * ice_service_task_restart - restart service task and schedule works
1507 * @pf: board private structure
1508 *
1509 * This function is needed for suspend and resume works (e.g WoL scenario)
1510 */
1511static void ice_service_task_restart(struct ice_pf *pf)
1512{
1513	clear_bit(ICE_SERVICE_DIS, pf->state);
1514	ice_service_task_schedule(pf);
1515}
1516
1517/**
1518 * ice_service_timer - timer callback to schedule service task
1519 * @t: pointer to timer_list
1520 */
1521static void ice_service_timer(struct timer_list *t)
1522{
1523	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1524
1525	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1526	ice_service_task_schedule(pf);
1527}
1528
1529/**
1530 * ice_handle_mdd_event - handle malicious driver detect event
1531 * @pf: pointer to the PF structure
1532 *
1533 * Called from service task. OICR interrupt handler indicates MDD event.
1534 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1535 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1536 * disable the queue, the PF can be configured to reset the VF using ethtool
1537 * private flag mdd-auto-reset-vf.
1538 */
1539static void ice_handle_mdd_event(struct ice_pf *pf)
1540{
1541	struct device *dev = ice_pf_to_dev(pf);
1542	struct ice_hw *hw = &pf->hw;
1543	unsigned int i;
 
1544	u32 reg;
1545
1546	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1547		/* Since the VF MDD event logging is rate limited, check if
1548		 * there are pending MDD events.
1549		 */
1550		ice_print_vfs_mdd_events(pf);
1551		return;
1552	}
1553
1554	/* find what triggered an MDD event */
1555	reg = rd32(hw, GL_MDET_TX_PQM);
1556	if (reg & GL_MDET_TX_PQM_VALID_M) {
1557		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1558				GL_MDET_TX_PQM_PF_NUM_S;
1559		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1560				GL_MDET_TX_PQM_VF_NUM_S;
1561		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1562				GL_MDET_TX_PQM_MAL_TYPE_S;
1563		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1564				GL_MDET_TX_PQM_QNUM_S);
1565
1566		if (netif_msg_tx_err(pf))
1567			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1568				 event, queue, pf_num, vf_num);
1569		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1570	}
1571
1572	reg = rd32(hw, GL_MDET_TX_TCLAN);
1573	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1574		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1575				GL_MDET_TX_TCLAN_PF_NUM_S;
1576		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1577				GL_MDET_TX_TCLAN_VF_NUM_S;
1578		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1579				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1580		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1581				GL_MDET_TX_TCLAN_QNUM_S);
1582
1583		if (netif_msg_tx_err(pf))
1584			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1585				 event, queue, pf_num, vf_num);
1586		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1587	}
1588
1589	reg = rd32(hw, GL_MDET_RX);
1590	if (reg & GL_MDET_RX_VALID_M) {
1591		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1592				GL_MDET_RX_PF_NUM_S;
1593		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1594				GL_MDET_RX_VF_NUM_S;
1595		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1596				GL_MDET_RX_MAL_TYPE_S;
1597		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1598				GL_MDET_RX_QNUM_S);
1599
1600		if (netif_msg_rx_err(pf))
1601			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1602				 event, queue, pf_num, vf_num);
1603		wr32(hw, GL_MDET_RX, 0xffffffff);
1604	}
1605
1606	/* check to see if this PF caused an MDD event */
1607	reg = rd32(hw, PF_MDET_TX_PQM);
1608	if (reg & PF_MDET_TX_PQM_VALID_M) {
1609		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1610		if (netif_msg_tx_err(pf))
1611			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1612	}
1613
1614	reg = rd32(hw, PF_MDET_TX_TCLAN);
1615	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1616		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1617		if (netif_msg_tx_err(pf))
1618			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1619	}
1620
1621	reg = rd32(hw, PF_MDET_RX);
1622	if (reg & PF_MDET_RX_VALID_M) {
1623		wr32(hw, PF_MDET_RX, 0xFFFF);
1624		if (netif_msg_rx_err(pf))
1625			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1626	}
1627
1628	/* Check to see if one of the VFs caused an MDD event, and then
1629	 * increment counters and set print pending
1630	 */
1631	ice_for_each_vf(pf, i) {
1632		struct ice_vf *vf = &pf->vf[i];
1633
1634		reg = rd32(hw, VP_MDET_TX_PQM(i));
1635		if (reg & VP_MDET_TX_PQM_VALID_M) {
1636			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1637			vf->mdd_tx_events.count++;
1638			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1639			if (netif_msg_tx_err(pf))
1640				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1641					 i);
1642		}
1643
1644		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1645		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1646			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1647			vf->mdd_tx_events.count++;
1648			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1649			if (netif_msg_tx_err(pf))
1650				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1651					 i);
1652		}
1653
1654		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1655		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1656			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1657			vf->mdd_tx_events.count++;
1658			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1659			if (netif_msg_tx_err(pf))
1660				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1661					 i);
1662		}
1663
1664		reg = rd32(hw, VP_MDET_RX(i));
1665		if (reg & VP_MDET_RX_VALID_M) {
1666			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1667			vf->mdd_rx_events.count++;
1668			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1669			if (netif_msg_rx_err(pf))
1670				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1671					 i);
1672
1673			/* Since the queue is disabled on VF Rx MDD events, the
1674			 * PF can be configured to reset the VF through ethtool
1675			 * private flag mdd-auto-reset-vf.
1676			 */
1677			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1678				/* VF MDD event counters will be cleared by
1679				 * reset, so print the event prior to reset.
1680				 */
1681				ice_print_vf_rx_mdd_event(vf);
1682				ice_reset_vf(&pf->vf[i], false);
1683			}
1684		}
1685	}
 
1686
1687	ice_print_vfs_mdd_events(pf);
1688}
1689
1690/**
1691 * ice_force_phys_link_state - Force the physical link state
1692 * @vsi: VSI to force the physical link state to up/down
1693 * @link_up: true/false indicates to set the physical link to up/down
1694 *
1695 * Force the physical link state by getting the current PHY capabilities from
1696 * hardware and setting the PHY config based on the determined capabilities. If
1697 * link changes a link event will be triggered because both the Enable Automatic
1698 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1699 *
1700 * Returns 0 on success, negative on failure
1701 */
1702static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1703{
1704	struct ice_aqc_get_phy_caps_data *pcaps;
1705	struct ice_aqc_set_phy_cfg_data *cfg;
1706	struct ice_port_info *pi;
1707	struct device *dev;
1708	int retcode;
1709
1710	if (!vsi || !vsi->port_info || !vsi->back)
1711		return -EINVAL;
1712	if (vsi->type != ICE_VSI_PF)
1713		return 0;
1714
1715	dev = ice_pf_to_dev(vsi->back);
1716
1717	pi = vsi->port_info;
1718
1719	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1720	if (!pcaps)
1721		return -ENOMEM;
1722
1723	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1724				      NULL);
1725	if (retcode) {
1726		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1727			vsi->vsi_num, retcode);
1728		retcode = -EIO;
1729		goto out;
1730	}
1731
1732	/* No change in link */
1733	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1734	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1735		goto out;
1736
1737	/* Use the current user PHY configuration. The current user PHY
1738	 * configuration is initialized during probe from PHY capabilities
1739	 * software mode, and updated on set PHY configuration.
1740	 */
1741	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1742	if (!cfg) {
1743		retcode = -ENOMEM;
1744		goto out;
1745	}
1746
1747	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1748	if (link_up)
1749		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1750	else
1751		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1752
1753	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1754	if (retcode) {
1755		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1756			vsi->vsi_num, retcode);
1757		retcode = -EIO;
1758	}
1759
1760	kfree(cfg);
1761out:
1762	kfree(pcaps);
1763	return retcode;
1764}
1765
1766/**
1767 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1768 * @pi: port info structure
1769 *
1770 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1771 */
1772static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1773{
1774	struct ice_aqc_get_phy_caps_data *pcaps;
1775	struct ice_pf *pf = pi->hw->back;
1776	enum ice_status status;
1777	int err = 0;
1778
1779	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1780	if (!pcaps)
1781		return -ENOMEM;
1782
1783	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1784				     NULL);
1785
1786	if (status) {
1787		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1788		err = -EIO;
1789		goto out;
1790	}
1791
1792	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1793	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1794
1795out:
1796	kfree(pcaps);
1797	return err;
1798}
1799
1800/**
1801 * ice_init_link_dflt_override - Initialize link default override
1802 * @pi: port info structure
1803 *
1804 * Initialize link default override and PHY total port shutdown during probe
1805 */
1806static void ice_init_link_dflt_override(struct ice_port_info *pi)
1807{
1808	struct ice_link_default_override_tlv *ldo;
1809	struct ice_pf *pf = pi->hw->back;
1810
1811	ldo = &pf->link_dflt_override;
1812	if (ice_get_link_default_override(ldo, pi))
1813		return;
1814
1815	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1816		return;
1817
1818	/* Enable Total Port Shutdown (override/replace link-down-on-close
1819	 * ethtool private flag) for ports with Port Disable bit set.
1820	 */
1821	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1822	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1823}
1824
1825/**
1826 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1827 * @pi: port info structure
1828 *
1829 * If default override is enabled, initialize the user PHY cfg speed and FEC
1830 * settings using the default override mask from the NVM.
1831 *
1832 * The PHY should only be configured with the default override settings the
1833 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1834 * is used to indicate that the user PHY cfg default override is initialized
1835 * and the PHY has not been configured with the default override settings. The
1836 * state is set here, and cleared in ice_configure_phy the first time the PHY is
1837 * configured.
1838 *
1839 * This function should be called only if the FW doesn't support default
1840 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1841 */
1842static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1843{
1844	struct ice_link_default_override_tlv *ldo;
1845	struct ice_aqc_set_phy_cfg_data *cfg;
1846	struct ice_phy_info *phy = &pi->phy;
1847	struct ice_pf *pf = pi->hw->back;
1848
1849	ldo = &pf->link_dflt_override;
1850
1851	/* If link default override is enabled, use to mask NVM PHY capabilities
1852	 * for speed and FEC default configuration.
1853	 */
1854	cfg = &phy->curr_user_phy_cfg;
1855
1856	if (ldo->phy_type_low || ldo->phy_type_high) {
1857		cfg->phy_type_low = pf->nvm_phy_type_lo &
1858				    cpu_to_le64(ldo->phy_type_low);
1859		cfg->phy_type_high = pf->nvm_phy_type_hi &
1860				     cpu_to_le64(ldo->phy_type_high);
1861	}
1862	cfg->link_fec_opt = ldo->fec_options;
1863	phy->curr_user_fec_req = ICE_FEC_AUTO;
1864
1865	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1866}
1867
1868/**
1869 * ice_init_phy_user_cfg - Initialize the PHY user configuration
1870 * @pi: port info structure
1871 *
1872 * Initialize the current user PHY configuration, speed, FEC, and FC requested
1873 * mode to default. The PHY defaults are from get PHY capabilities topology
1874 * with media so call when media is first available. An error is returned if
1875 * called when media is not available. The PHY initialization completed state is
1876 * set here.
1877 *
1878 * These configurations are used when setting PHY
1879 * configuration. The user PHY configuration is updated on set PHY
1880 * configuration. Returns 0 on success, negative on failure
1881 */
1882static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1883{
1884	struct ice_aqc_get_phy_caps_data *pcaps;
1885	struct ice_phy_info *phy = &pi->phy;
1886	struct ice_pf *pf = pi->hw->back;
1887	enum ice_status status;
1888	int err = 0;
1889
1890	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1891		return -EIO;
1892
1893	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1894	if (!pcaps)
1895		return -ENOMEM;
1896
1897	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1898		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1899					     pcaps, NULL);
1900	else
1901		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1902					     pcaps, NULL);
1903	if (status) {
1904		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1905		err = -EIO;
1906		goto err_out;
1907	}
1908
1909	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1910
1911	/* check if lenient mode is supported and enabled */
1912	if (ice_fw_supports_link_override(pi->hw) &&
1913	    !(pcaps->module_compliance_enforcement &
1914	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1915		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1916
1917		/* if the FW supports default PHY configuration mode, then the driver
1918		 * does not have to apply link override settings. If not,
1919		 * initialize user PHY configuration with link override values
1920		 */
1921		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
1922		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
1923			ice_init_phy_cfg_dflt_override(pi);
1924			goto out;
1925		}
1926	}
1927
1928	/* if link default override is not enabled, set user flow control and
1929	 * FEC settings based on what get_phy_caps returned
1930	 */
1931	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1932						      pcaps->link_fec_options);
1933	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1934
1935out:
1936	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1937	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1938err_out:
1939	kfree(pcaps);
1940	return err;
1941}
1942
1943/**
1944 * ice_configure_phy - configure PHY
1945 * @vsi: VSI of PHY
1946 *
1947 * Set the PHY configuration. If the current PHY configuration is the same as
1948 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1949 * configure the based get PHY capabilities for topology with media.
1950 */
1951static int ice_configure_phy(struct ice_vsi *vsi)
1952{
1953	struct device *dev = ice_pf_to_dev(vsi->back);
1954	struct ice_port_info *pi = vsi->port_info;
1955	struct ice_aqc_get_phy_caps_data *pcaps;
1956	struct ice_aqc_set_phy_cfg_data *cfg;
1957	struct ice_phy_info *phy = &pi->phy;
1958	struct ice_pf *pf = vsi->back;
1959	enum ice_status status;
1960	int err = 0;
1961
1962	/* Ensure we have media as we cannot configure a medialess port */
1963	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1964		return -EPERM;
1965
1966	ice_print_topo_conflict(vsi);
1967
1968	if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
 
1969		return -EPERM;
1970
1971	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1972		return ice_force_phys_link_state(vsi, true);
1973
1974	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1975	if (!pcaps)
1976		return -ENOMEM;
1977
1978	/* Get current PHY config */
1979	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1980				     NULL);
1981	if (status) {
1982		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1983			vsi->vsi_num, ice_stat_str(status));
1984		err = -EIO;
1985		goto done;
1986	}
1987
1988	/* If PHY enable link is configured and configuration has not changed,
1989	 * there's nothing to do
1990	 */
1991	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1992	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
1993		goto done;
1994
1995	/* Use PHY topology as baseline for configuration */
1996	memset(pcaps, 0, sizeof(*pcaps));
1997	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1998		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1999					     pcaps, NULL);
2000	else
2001		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2002					     pcaps, NULL);
2003	if (status) {
2004		dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
2005			vsi->vsi_num, ice_stat_str(status));
2006		err = -EIO;
2007		goto done;
2008	}
2009
2010	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2011	if (!cfg) {
2012		err = -ENOMEM;
2013		goto done;
2014	}
2015
2016	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2017
2018	/* Speed - If default override pending, use curr_user_phy_cfg set in
2019	 * ice_init_phy_user_cfg_ldo.
2020	 */
2021	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2022			       vsi->back->state)) {
2023		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2024		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2025	} else {
2026		u64 phy_low = 0, phy_high = 0;
2027
2028		ice_update_phy_type(&phy_low, &phy_high,
2029				    pi->phy.curr_user_speed_req);
2030		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2031		cfg->phy_type_high = pcaps->phy_type_high &
2032				     cpu_to_le64(phy_high);
2033	}
2034
2035	/* Can't provide what was requested; use PHY capabilities */
2036	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2037		cfg->phy_type_low = pcaps->phy_type_low;
2038		cfg->phy_type_high = pcaps->phy_type_high;
2039	}
2040
2041	/* FEC */
2042	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2043
2044	/* Can't provide what was requested; use PHY capabilities */
2045	if (cfg->link_fec_opt !=
2046	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2047		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2048		cfg->link_fec_opt = pcaps->link_fec_options;
2049	}
2050
2051	/* Flow Control - always supported; no need to check against
2052	 * capabilities
2053	 */
2054	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2055
2056	/* Enable link and link update */
2057	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2058
2059	status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2060	if (status) {
2061		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
2062			vsi->vsi_num, ice_stat_str(status));
2063		err = -EIO;
2064	}
2065
2066	kfree(cfg);
2067done:
2068	kfree(pcaps);
2069	return err;
2070}
2071
2072/**
2073 * ice_check_media_subtask - Check for media
2074 * @pf: pointer to PF struct
2075 *
2076 * If media is available, then initialize PHY user configuration if it is not
2077 * been, and configure the PHY if the interface is up.
2078 */
2079static void ice_check_media_subtask(struct ice_pf *pf)
2080{
2081	struct ice_port_info *pi;
2082	struct ice_vsi *vsi;
2083	int err;
2084
2085	/* No need to check for media if it's already present */
2086	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2087		return;
2088
2089	vsi = ice_get_main_vsi(pf);
2090	if (!vsi)
2091		return;
2092
2093	/* Refresh link info and check if media is present */
2094	pi = vsi->port_info;
2095	err = ice_update_link_info(pi);
2096	if (err)
2097		return;
2098
2099	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
2100
2101	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2102		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2103			ice_init_phy_user_cfg(pi);
2104
2105		/* PHY settings are reset on media insertion, reconfigure
2106		 * PHY to preserve settings.
2107		 */
2108		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2109		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2110			return;
2111
2112		err = ice_configure_phy(vsi);
2113		if (!err)
2114			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2115
2116		/* A Link Status Event will be generated; the event handler
2117		 * will complete bringing the interface up
2118		 */
2119	}
2120}
2121
2122/**
2123 * ice_service_task - manage and run subtasks
2124 * @work: pointer to work_struct contained by the PF struct
2125 */
2126static void ice_service_task(struct work_struct *work)
2127{
2128	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2129	unsigned long start_time = jiffies;
2130
2131	/* subtasks */
2132
2133	/* process reset requests first */
2134	ice_reset_subtask(pf);
2135
2136	/* bail if a reset/recovery cycle is pending or rebuild failed */
2137	if (ice_is_reset_in_progress(pf->state) ||
2138	    test_bit(ICE_SUSPENDED, pf->state) ||
2139	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2140		ice_service_task_complete(pf);
2141		return;
2142	}
2143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2144	ice_clean_adminq_subtask(pf);
2145	ice_check_media_subtask(pf);
2146	ice_check_for_hang_subtask(pf);
2147	ice_sync_fltr_subtask(pf);
2148	ice_handle_mdd_event(pf);
2149	ice_watchdog_subtask(pf);
2150
2151	if (ice_is_safe_mode(pf)) {
2152		ice_service_task_complete(pf);
2153		return;
2154	}
2155
2156	ice_process_vflr_event(pf);
2157	ice_clean_mailboxq_subtask(pf);
2158	ice_clean_sbq_subtask(pf);
2159	ice_sync_arfs_fltrs(pf);
2160	ice_flush_fdir_ctx(pf);
2161
2162	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2163	ice_service_task_complete(pf);
2164
2165	/* If the tasks have taken longer than one service timer period
2166	 * or there is more work to be done, reset the service timer to
2167	 * schedule the service task now.
2168	 */
2169	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2170	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2171	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2172	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2173	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2174	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2175	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2176		mod_timer(&pf->serv_tmr, jiffies);
2177}
2178
2179/**
2180 * ice_set_ctrlq_len - helper function to set controlq length
2181 * @hw: pointer to the HW instance
2182 */
2183static void ice_set_ctrlq_len(struct ice_hw *hw)
2184{
2185	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2186	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2187	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2188	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2189	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2190	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2191	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2192	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2193	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2194	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2195	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2196	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2197}
2198
2199/**
2200 * ice_schedule_reset - schedule a reset
2201 * @pf: board private structure
2202 * @reset: reset being requested
2203 */
2204int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2205{
2206	struct device *dev = ice_pf_to_dev(pf);
2207
2208	/* bail out if earlier reset has failed */
2209	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2210		dev_dbg(dev, "earlier reset has failed\n");
2211		return -EIO;
2212	}
2213	/* bail if reset/recovery already in progress */
2214	if (ice_is_reset_in_progress(pf->state)) {
2215		dev_dbg(dev, "Reset already in progress\n");
2216		return -EBUSY;
2217	}
2218
2219	ice_unplug_aux_dev(pf);
2220
2221	switch (reset) {
2222	case ICE_RESET_PFR:
2223		set_bit(ICE_PFR_REQ, pf->state);
2224		break;
2225	case ICE_RESET_CORER:
2226		set_bit(ICE_CORER_REQ, pf->state);
2227		break;
2228	case ICE_RESET_GLOBR:
2229		set_bit(ICE_GLOBR_REQ, pf->state);
2230		break;
2231	default:
2232		return -EINVAL;
2233	}
2234
2235	ice_service_task_schedule(pf);
2236	return 0;
2237}
2238
2239/**
2240 * ice_irq_affinity_notify - Callback for affinity changes
2241 * @notify: context as to what irq was changed
2242 * @mask: the new affinity mask
2243 *
2244 * This is a callback function used by the irq_set_affinity_notifier function
2245 * so that we may register to receive changes to the irq affinity masks.
2246 */
2247static void
2248ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2249			const cpumask_t *mask)
2250{
2251	struct ice_q_vector *q_vector =
2252		container_of(notify, struct ice_q_vector, affinity_notify);
2253
2254	cpumask_copy(&q_vector->affinity_mask, mask);
2255}
2256
2257/**
2258 * ice_irq_affinity_release - Callback for affinity notifier release
2259 * @ref: internal core kernel usage
2260 *
2261 * This is a callback function used by the irq_set_affinity_notifier function
2262 * to inform the current notification subscriber that they will no longer
2263 * receive notifications.
2264 */
2265static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2266
2267/**
2268 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2269 * @vsi: the VSI being configured
2270 */
2271static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2272{
2273	struct ice_hw *hw = &vsi->back->hw;
2274	int i;
2275
2276	ice_for_each_q_vector(vsi, i)
2277		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2278
2279	ice_flush(hw);
2280	return 0;
2281}
2282
2283/**
2284 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2285 * @vsi: the VSI being configured
2286 * @basename: name for the vector
2287 */
2288static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2289{
2290	int q_vectors = vsi->num_q_vectors;
2291	struct ice_pf *pf = vsi->back;
2292	int base = vsi->base_vector;
2293	struct device *dev;
2294	int rx_int_idx = 0;
2295	int tx_int_idx = 0;
2296	int vector, err;
2297	int irq_num;
2298
2299	dev = ice_pf_to_dev(pf);
2300	for (vector = 0; vector < q_vectors; vector++) {
2301		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2302
2303		irq_num = pf->msix_entries[base + vector].vector;
2304
2305		if (q_vector->tx.ring && q_vector->rx.ring) {
2306			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2307				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2308			tx_int_idx++;
2309		} else if (q_vector->rx.ring) {
2310			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2311				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2312		} else if (q_vector->tx.ring) {
2313			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2314				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2315		} else {
2316			/* skip this unused q_vector */
2317			continue;
2318		}
2319		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2320			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2321					       IRQF_SHARED, q_vector->name,
2322					       q_vector);
2323		else
2324			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2325					       0, q_vector->name, q_vector);
2326		if (err) {
2327			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2328				   err);
2329			goto free_q_irqs;
2330		}
2331
2332		/* register for affinity change notifications */
2333		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2334			struct irq_affinity_notify *affinity_notify;
2335
2336			affinity_notify = &q_vector->affinity_notify;
2337			affinity_notify->notify = ice_irq_affinity_notify;
2338			affinity_notify->release = ice_irq_affinity_release;
2339			irq_set_affinity_notifier(irq_num, affinity_notify);
2340		}
2341
2342		/* assign the mask for this irq */
2343		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2344	}
2345
 
 
 
 
 
 
 
2346	vsi->irqs_ready = true;
2347	return 0;
2348
2349free_q_irqs:
2350	while (vector) {
2351		vector--;
2352		irq_num = pf->msix_entries[base + vector].vector;
2353		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2354			irq_set_affinity_notifier(irq_num, NULL);
2355		irq_set_affinity_hint(irq_num, NULL);
2356		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2357	}
2358	return err;
2359}
2360
2361/**
2362 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2363 * @vsi: VSI to setup Tx rings used by XDP
2364 *
2365 * Return 0 on success and negative value on error
2366 */
2367static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2368{
2369	struct device *dev = ice_pf_to_dev(vsi->back);
2370	int i;
 
2371
2372	for (i = 0; i < vsi->num_xdp_txq; i++) {
2373		u16 xdp_q_idx = vsi->alloc_txq + i;
2374		struct ice_ring *xdp_ring;
 
2375
2376		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
 
 
2377
2378		if (!xdp_ring)
 
 
2379			goto free_xdp_rings;
 
2380
 
2381		xdp_ring->q_index = xdp_q_idx;
2382		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2383		xdp_ring->ring_active = false;
2384		xdp_ring->vsi = vsi;
2385		xdp_ring->netdev = NULL;
2386		xdp_ring->dev = dev;
2387		xdp_ring->count = vsi->num_tx_desc;
2388		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2389		if (ice_setup_tx_ring(xdp_ring))
2390			goto free_xdp_rings;
2391		ice_set_ring_xdp(xdp_ring);
2392		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
 
 
 
 
2393	}
2394
2395	return 0;
2396
2397free_xdp_rings:
2398	for (; i >= 0; i--)
2399		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
 
 
2400			ice_free_tx_ring(vsi->xdp_rings[i]);
 
 
2401	return -ENOMEM;
2402}
2403
2404/**
2405 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2406 * @vsi: VSI to set the bpf prog on
2407 * @prog: the bpf prog pointer
2408 */
2409static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2410{
2411	struct bpf_prog *old_prog;
2412	int i;
2413
2414	old_prog = xchg(&vsi->xdp_prog, prog);
 
 
 
2415	if (old_prog)
2416		bpf_prog_put(old_prog);
2417
2418	ice_for_each_rxq(vsi, i)
2419		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2420}
2421
2422/**
2423 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2424 * @vsi: VSI to bring up Tx rings used by XDP
2425 * @prog: bpf program that will be assigned to VSI
2426 *
2427 * Return 0 on success and negative value on error
2428 */
2429int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2430{
2431	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2432	int xdp_rings_rem = vsi->num_xdp_txq;
2433	struct ice_pf *pf = vsi->back;
2434	struct ice_qs_cfg xdp_qs_cfg = {
2435		.qs_mutex = &pf->avail_q_mutex,
2436		.pf_map = pf->avail_txqs,
2437		.pf_map_size = pf->max_pf_txqs,
2438		.q_count = vsi->num_xdp_txq,
2439		.scatter_count = ICE_MAX_SCATTER_TXQS,
2440		.vsi_map = vsi->txq_map,
2441		.vsi_map_offset = vsi->alloc_txq,
2442		.mapping_mode = ICE_VSI_MAP_CONTIG
2443	};
2444	enum ice_status status;
2445	struct device *dev;
2446	int i, v_idx;
 
2447
2448	dev = ice_pf_to_dev(pf);
2449	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2450				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2451	if (!vsi->xdp_rings)
2452		return -ENOMEM;
2453
2454	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2455	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2456		goto err_map_xdp;
2457
 
 
 
 
2458	if (ice_xdp_alloc_setup_rings(vsi))
2459		goto clear_xdp_rings;
2460
2461	/* follow the logic from ice_vsi_map_rings_to_vectors */
2462	ice_for_each_q_vector(vsi, v_idx) {
2463		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2464		int xdp_rings_per_v, q_id, q_base;
2465
2466		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2467					       vsi->num_q_vectors - v_idx);
2468		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2469
2470		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2471			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2472
2473			xdp_ring->q_vector = q_vector;
2474			xdp_ring->next = q_vector->tx.ring;
2475			q_vector->tx.ring = xdp_ring;
2476		}
2477		xdp_rings_rem -= xdp_rings_per_v;
2478	}
2479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2480	/* omit the scheduler update if in reset path; XDP queues will be
2481	 * taken into account at the end of ice_vsi_rebuild, where
2482	 * ice_cfg_vsi_lan is being called
2483	 */
2484	if (ice_is_reset_in_progress(pf->state))
2485		return 0;
2486
2487	/* tell the Tx scheduler that right now we have
2488	 * additional queues
2489	 */
2490	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2491		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2492
2493	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2494				 max_txqs);
2495	if (status) {
2496		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2497			ice_stat_str(status));
2498		goto clear_xdp_rings;
2499	}
2500	ice_vsi_assign_bpf_prog(vsi, prog);
 
 
 
 
 
 
 
 
 
 
 
2501
2502	return 0;
2503clear_xdp_rings:
2504	for (i = 0; i < vsi->num_xdp_txq; i++)
2505		if (vsi->xdp_rings[i]) {
2506			kfree_rcu(vsi->xdp_rings[i], rcu);
2507			vsi->xdp_rings[i] = NULL;
2508		}
2509
2510err_map_xdp:
2511	mutex_lock(&pf->avail_q_mutex);
2512	for (i = 0; i < vsi->num_xdp_txq; i++) {
2513		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2514		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2515	}
2516	mutex_unlock(&pf->avail_q_mutex);
2517
2518	devm_kfree(dev, vsi->xdp_rings);
2519	return -ENOMEM;
2520}
2521
2522/**
2523 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2524 * @vsi: VSI to remove XDP rings
2525 *
2526 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2527 * resources
2528 */
2529int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2530{
2531	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2532	struct ice_pf *pf = vsi->back;
2533	int i, v_idx;
2534
2535	/* q_vectors are freed in reset path so there's no point in detaching
2536	 * rings; in case of rebuild being triggered not from reset bits
2537	 * in pf->state won't be set, so additionally check first q_vector
2538	 * against NULL
2539	 */
2540	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2541		goto free_qmap;
2542
2543	ice_for_each_q_vector(vsi, v_idx) {
2544		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2545		struct ice_ring *ring;
2546
2547		ice_for_each_ring(ring, q_vector->tx)
2548			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2549				break;
2550
2551		/* restore the value of last node prior to XDP setup */
2552		q_vector->tx.ring = ring;
2553	}
2554
2555free_qmap:
2556	mutex_lock(&pf->avail_q_mutex);
2557	for (i = 0; i < vsi->num_xdp_txq; i++) {
2558		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2559		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2560	}
2561	mutex_unlock(&pf->avail_q_mutex);
2562
2563	for (i = 0; i < vsi->num_xdp_txq; i++)
2564		if (vsi->xdp_rings[i]) {
2565			if (vsi->xdp_rings[i]->desc)
 
2566				ice_free_tx_ring(vsi->xdp_rings[i]);
 
 
 
2567			kfree_rcu(vsi->xdp_rings[i], rcu);
2568			vsi->xdp_rings[i] = NULL;
2569		}
2570
2571	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2572	vsi->xdp_rings = NULL;
2573
 
 
 
2574	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2575		return 0;
2576
2577	ice_vsi_assign_bpf_prog(vsi, NULL);
2578
2579	/* notify Tx scheduler that we destroyed XDP queues and bring
2580	 * back the old number of child nodes
2581	 */
2582	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2583		max_txqs[i] = vsi->num_txq;
2584
2585	/* change number of XDP Tx queues to 0 */
2586	vsi->num_xdp_txq = 0;
2587
2588	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2589			       max_txqs);
2590}
2591
2592/**
2593 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2594 * @vsi: VSI to schedule napi on
2595 */
2596static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2597{
2598	int i;
2599
2600	ice_for_each_rxq(vsi, i) {
2601		struct ice_ring *rx_ring = vsi->rx_rings[i];
2602
2603		if (rx_ring->xsk_pool)
2604			napi_schedule(&rx_ring->q_vector->napi);
2605	}
2606}
2607
2608/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2609 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2610 * @vsi: VSI to setup XDP for
2611 * @prog: XDP program
2612 * @extack: netlink extended ack
2613 */
2614static int
2615ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2616		   struct netlink_ext_ack *extack)
2617{
2618	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2619	bool if_running = netif_running(vsi->netdev);
2620	int ret = 0, xdp_ring_err = 0;
2621
2622	if (frame_size > vsi->rx_buf_len) {
2623		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2624		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
2625	}
2626
2627	/* need to stop netdev while setting up the program for Rx rings */
2628	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2629		ret = ice_down(vsi);
2630		if (ret) {
2631			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2632			return ret;
2633		}
2634	}
2635
2636	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2637		vsi->num_xdp_txq = vsi->alloc_rxq;
2638		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
 
 
 
 
 
 
 
 
 
2639		if (xdp_ring_err)
2640			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2641	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
 
2642		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2643		if (xdp_ring_err)
2644			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2645	} else {
2646		ice_vsi_assign_bpf_prog(vsi, prog);
 
 
2647	}
2648
2649	if (if_running)
2650		ret = ice_up(vsi);
2651
2652	if (!ret && prog)
2653		ice_vsi_rx_napi_schedule(vsi);
2654
2655	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2656}
2657
2658/**
2659 * ice_xdp_safe_mode - XDP handler for safe mode
2660 * @dev: netdevice
2661 * @xdp: XDP command
2662 */
2663static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2664			     struct netdev_bpf *xdp)
2665{
2666	NL_SET_ERR_MSG_MOD(xdp->extack,
2667			   "Please provide working DDP firmware package in order to use XDP\n"
2668			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2669	return -EOPNOTSUPP;
2670}
2671
2672/**
2673 * ice_xdp - implements XDP handler
2674 * @dev: netdevice
2675 * @xdp: XDP command
2676 */
2677static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2678{
2679	struct ice_netdev_priv *np = netdev_priv(dev);
2680	struct ice_vsi *vsi = np->vsi;
2681
2682	if (vsi->type != ICE_VSI_PF) {
2683		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2684		return -EINVAL;
2685	}
2686
2687	switch (xdp->command) {
2688	case XDP_SETUP_PROG:
2689		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2690	case XDP_SETUP_XSK_POOL:
2691		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2692					  xdp->xsk.queue_id);
2693	default:
2694		return -EINVAL;
2695	}
2696}
2697
2698/**
2699 * ice_ena_misc_vector - enable the non-queue interrupts
2700 * @pf: board private structure
2701 */
2702static void ice_ena_misc_vector(struct ice_pf *pf)
2703{
2704	struct ice_hw *hw = &pf->hw;
 
2705	u32 val;
2706
2707	/* Disable anti-spoof detection interrupt to prevent spurious event
2708	 * interrupts during a function reset. Anti-spoof functionally is
2709	 * still supported.
2710	 */
2711	val = rd32(hw, GL_MDCK_TX_TDPU);
2712	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2713	wr32(hw, GL_MDCK_TX_TDPU, val);
2714
2715	/* clear things first */
2716	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2717	rd32(hw, PFINT_OICR);		/* read to clear */
2718
2719	val = (PFINT_OICR_ECC_ERR_M |
2720	       PFINT_OICR_MAL_DETECT_M |
2721	       PFINT_OICR_GRST_M |
2722	       PFINT_OICR_PCI_EXCEPTION_M |
2723	       PFINT_OICR_VFLR_M |
2724	       PFINT_OICR_HMC_ERR_M |
2725	       PFINT_OICR_PE_PUSH_M |
2726	       PFINT_OICR_PE_CRITERR_M);
2727
2728	wr32(hw, PFINT_OICR_ENA, val);
2729
2730	/* SW_ITR_IDX = 0, but don't change INTENA */
2731	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2732	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2733}
2734
2735/**
2736 * ice_misc_intr - misc interrupt handler
2737 * @irq: interrupt number
2738 * @data: pointer to a q_vector
2739 */
2740static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2741{
2742	struct ice_pf *pf = (struct ice_pf *)data;
 
2743	struct ice_hw *hw = &pf->hw;
2744	irqreturn_t ret = IRQ_NONE;
2745	struct device *dev;
2746	u32 oicr, ena_mask;
2747
2748	dev = ice_pf_to_dev(pf);
2749	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2750	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2751	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2752
2753	oicr = rd32(hw, PFINT_OICR);
2754	ena_mask = rd32(hw, PFINT_OICR_ENA);
2755
2756	if (oicr & PFINT_OICR_SWINT_M) {
2757		ena_mask &= ~PFINT_OICR_SWINT_M;
2758		pf->sw_int_count++;
2759	}
2760
2761	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2762		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2763		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2764	}
2765	if (oicr & PFINT_OICR_VFLR_M) {
2766		/* disable any further VFLR event notifications */
2767		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2768			u32 reg = rd32(hw, PFINT_OICR_ENA);
2769
2770			reg &= ~PFINT_OICR_VFLR_M;
2771			wr32(hw, PFINT_OICR_ENA, reg);
2772		} else {
2773			ena_mask &= ~PFINT_OICR_VFLR_M;
2774			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2775		}
2776	}
2777
2778	if (oicr & PFINT_OICR_GRST_M) {
2779		u32 reset;
2780
2781		/* we have a reset warning */
2782		ena_mask &= ~PFINT_OICR_GRST_M;
2783		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2784			GLGEN_RSTAT_RESET_TYPE_S;
2785
2786		if (reset == ICE_RESET_CORER)
2787			pf->corer_count++;
2788		else if (reset == ICE_RESET_GLOBR)
2789			pf->globr_count++;
2790		else if (reset == ICE_RESET_EMPR)
2791			pf->empr_count++;
2792		else
2793			dev_dbg(dev, "Invalid reset type %d\n", reset);
2794
2795		/* If a reset cycle isn't already in progress, we set a bit in
2796		 * pf->state so that the service task can start a reset/rebuild.
2797		 */
2798		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2799			if (reset == ICE_RESET_CORER)
2800				set_bit(ICE_CORER_RECV, pf->state);
2801			else if (reset == ICE_RESET_GLOBR)
2802				set_bit(ICE_GLOBR_RECV, pf->state);
2803			else
2804				set_bit(ICE_EMPR_RECV, pf->state);
2805
2806			/* There are couple of different bits at play here.
2807			 * hw->reset_ongoing indicates whether the hardware is
2808			 * in reset. This is set to true when a reset interrupt
2809			 * is received and set back to false after the driver
2810			 * has determined that the hardware is out of reset.
2811			 *
2812			 * ICE_RESET_OICR_RECV in pf->state indicates
2813			 * that a post reset rebuild is required before the
2814			 * driver is operational again. This is set above.
2815			 *
2816			 * As this is the start of the reset/rebuild cycle, set
2817			 * both to indicate that.
2818			 */
2819			hw->reset_ongoing = true;
2820		}
2821	}
2822
2823	if (oicr & PFINT_OICR_TSYN_TX_M) {
2824		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
2825		ice_ptp_process_ts(pf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2826	}
2827
2828	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
2829		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2830		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
2831
2832		/* Save EVENTs from GTSYN register */
2833		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
2834						     GLTSYN_STAT_EVENT1_M |
2835						     GLTSYN_STAT_EVENT2_M);
2836		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
2837		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
 
 
 
 
 
 
 
 
 
2838	}
2839
2840#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
2841	if (oicr & ICE_AUX_CRIT_ERR) {
2842		struct iidc_event *event;
2843
2844		ena_mask &= ~ICE_AUX_CRIT_ERR;
2845		event = kzalloc(sizeof(*event), GFP_KERNEL);
2846		if (event) {
2847			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2848			/* report the entire OICR value to AUX driver */
2849			event->reg = oicr;
2850			ice_send_event_to_aux(pf, event);
2851			kfree(event);
2852		}
2853	}
2854
2855	/* Report any remaining unexpected interrupts */
2856	oicr &= ena_mask;
2857	if (oicr) {
2858		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2859		/* If a critical error is pending there is no choice but to
2860		 * reset the device.
2861		 */
2862		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
2863			    PFINT_OICR_ECC_ERR_M)) {
2864			set_bit(ICE_PFR_REQ, pf->state);
2865			ice_service_task_schedule(pf);
2866		}
2867	}
2868	ret = IRQ_HANDLED;
 
 
 
 
 
2869
2870	ice_service_task_schedule(pf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2871	ice_irq_dynamic_ena(hw, NULL, NULL);
2872
2873	return ret;
2874}
2875
2876/**
2877 * ice_dis_ctrlq_interrupts - disable control queue interrupts
2878 * @hw: pointer to HW structure
2879 */
2880static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2881{
2882	/* disable Admin queue Interrupt causes */
2883	wr32(hw, PFINT_FW_CTL,
2884	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2885
2886	/* disable Mailbox queue Interrupt causes */
2887	wr32(hw, PFINT_MBX_CTL,
2888	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2889
2890	wr32(hw, PFINT_SB_CTL,
2891	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
2892
2893	/* disable Control queue Interrupt causes */
2894	wr32(hw, PFINT_OICR_CTL,
2895	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2896
2897	ice_flush(hw);
2898}
2899
2900/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2901 * ice_free_irq_msix_misc - Unroll misc vector setup
2902 * @pf: board private structure
2903 */
2904static void ice_free_irq_msix_misc(struct ice_pf *pf)
2905{
 
2906	struct ice_hw *hw = &pf->hw;
2907
2908	ice_dis_ctrlq_interrupts(hw);
2909
2910	/* disable OICR interrupt */
2911	wr32(hw, PFINT_OICR_ENA, 0);
2912	ice_flush(hw);
2913
2914	if (pf->msix_entries) {
2915		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2916		devm_free_irq(ice_pf_to_dev(pf),
2917			      pf->msix_entries[pf->oicr_idx].vector, pf);
2918	}
2919
2920	pf->num_avail_sw_msix += 1;
2921	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
 
2922}
2923
2924/**
2925 * ice_ena_ctrlq_interrupts - enable control queue interrupts
2926 * @hw: pointer to HW structure
2927 * @reg_idx: HW vector index to associate the control queue interrupts with
2928 */
2929static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2930{
2931	u32 val;
2932
2933	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2934	       PFINT_OICR_CTL_CAUSE_ENA_M);
2935	wr32(hw, PFINT_OICR_CTL, val);
2936
2937	/* enable Admin queue Interrupt causes */
2938	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2939	       PFINT_FW_CTL_CAUSE_ENA_M);
2940	wr32(hw, PFINT_FW_CTL, val);
2941
2942	/* enable Mailbox queue Interrupt causes */
2943	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2944	       PFINT_MBX_CTL_CAUSE_ENA_M);
2945	wr32(hw, PFINT_MBX_CTL, val);
2946
2947	/* This enables Sideband queue Interrupt causes */
2948	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
2949	       PFINT_SB_CTL_CAUSE_ENA_M);
2950	wr32(hw, PFINT_SB_CTL, val);
 
 
2951
2952	ice_flush(hw);
2953}
2954
2955/**
2956 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2957 * @pf: board private structure
2958 *
2959 * This sets up the handler for MSIX 0, which is used to manage the
2960 * non-queue interrupts, e.g. AdminQ and errors. This is not used
2961 * when in MSI or Legacy interrupt mode.
2962 */
2963static int ice_req_irq_msix_misc(struct ice_pf *pf)
2964{
2965	struct device *dev = ice_pf_to_dev(pf);
2966	struct ice_hw *hw = &pf->hw;
2967	int oicr_idx, err = 0;
 
 
2968
2969	if (!pf->int_name[0])
2970		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2971			 dev_driver_string(dev), dev_name(dev));
2972
 
 
 
2973	/* Do not request IRQ but do enable OICR interrupt since settings are
2974	 * lost during reset. Note that this function is called only during
2975	 * rebuild path and not while reset is in progress.
2976	 */
2977	if (ice_is_reset_in_progress(pf->state))
2978		goto skip_req_irq;
2979
2980	/* reserve one vector in irq_tracker for misc interrupts */
2981	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2982	if (oicr_idx < 0)
2983		return oicr_idx;
 
 
 
 
 
 
 
 
 
 
 
2984
2985	pf->num_avail_sw_msix -= 1;
2986	pf->oicr_idx = (u16)oicr_idx;
 
2987
2988	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2989			       ice_misc_intr, 0, pf->int_name, pf);
 
 
 
 
 
2990	if (err) {
2991		dev_err(dev, "devm_request_irq for %s failed: %d\n",
2992			pf->int_name, err);
2993		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2994		pf->num_avail_sw_msix += 1;
2995		return err;
2996	}
2997
2998skip_req_irq:
2999	ice_ena_misc_vector(pf);
3000
3001	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3002	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
 
 
 
 
 
 
3003	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3004
3005	ice_flush(hw);
3006	ice_irq_dynamic_ena(hw, NULL, NULL);
3007
3008	return 0;
3009}
3010
3011/**
3012 * ice_napi_add - register NAPI handler for the VSI
3013 * @vsi: VSI for which NAPI handler is to be registered
3014 *
3015 * This function is only called in the driver's load path. Registering the NAPI
3016 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3017 * reset/rebuild, etc.)
3018 */
3019static void ice_napi_add(struct ice_vsi *vsi)
3020{
3021	int v_idx;
3022
3023	if (!vsi->netdev)
3024		return;
3025
3026	ice_for_each_q_vector(vsi, v_idx)
3027		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3028			       ice_napi_poll, NAPI_POLL_WEIGHT);
 
 
3029}
3030
3031/**
3032 * ice_set_ops - set netdev and ethtools ops for the given netdev
3033 * @netdev: netdev instance
3034 */
3035static void ice_set_ops(struct net_device *netdev)
3036{
 
3037	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3038
3039	if (ice_is_safe_mode(pf)) {
3040		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3041		ice_set_ethtool_safe_mode_ops(netdev);
3042		return;
3043	}
3044
3045	netdev->netdev_ops = &ice_netdev_ops;
3046	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
 
3047	ice_set_ethtool_ops(netdev);
 
 
 
 
 
 
 
 
3048}
3049
3050/**
3051 * ice_set_netdev_features - set features for the given netdev
3052 * @netdev: netdev instance
3053 */
3054static void ice_set_netdev_features(struct net_device *netdev)
3055{
3056	struct ice_pf *pf = ice_netdev_to_pf(netdev);
 
3057	netdev_features_t csumo_features;
3058	netdev_features_t vlano_features;
3059	netdev_features_t dflt_features;
3060	netdev_features_t tso_features;
3061
3062	if (ice_is_safe_mode(pf)) {
3063		/* safe mode */
3064		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3065		netdev->hw_features = netdev->features;
3066		return;
3067	}
3068
3069	dflt_features = NETIF_F_SG	|
3070			NETIF_F_HIGHDMA	|
3071			NETIF_F_NTUPLE	|
3072			NETIF_F_RXHASH;
3073
3074	csumo_features = NETIF_F_RXCSUM	  |
3075			 NETIF_F_IP_CSUM  |
3076			 NETIF_F_SCTP_CRC |
3077			 NETIF_F_IPV6_CSUM;
3078
3079	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3080			 NETIF_F_HW_VLAN_CTAG_TX     |
3081			 NETIF_F_HW_VLAN_CTAG_RX;
3082
 
 
 
 
3083	tso_features = NETIF_F_TSO			|
3084		       NETIF_F_TSO_ECN			|
3085		       NETIF_F_TSO6			|
3086		       NETIF_F_GSO_GRE			|
3087		       NETIF_F_GSO_UDP_TUNNEL		|
3088		       NETIF_F_GSO_GRE_CSUM		|
3089		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3090		       NETIF_F_GSO_PARTIAL		|
3091		       NETIF_F_GSO_IPXIP4		|
3092		       NETIF_F_GSO_IPXIP6		|
3093		       NETIF_F_GSO_UDP_L4;
3094
3095	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3096					NETIF_F_GSO_GRE_CSUM;
3097	/* set features that user can change */
3098	netdev->hw_features = dflt_features | csumo_features |
3099			      vlano_features | tso_features;
3100
3101	/* add support for HW_CSUM on packets with MPLS header */
3102	netdev->mpls_features =  NETIF_F_HW_CSUM;
 
 
3103
3104	/* enable features */
3105	netdev->features |= netdev->hw_features;
 
 
 
 
3106	/* encap and VLAN devices inherit default, csumo and tso features */
3107	netdev->hw_enc_features |= dflt_features | csumo_features |
3108				   tso_features;
3109	netdev->vlan_features |= dflt_features | csumo_features |
3110				 tso_features;
3111}
3112
3113/**
3114 * ice_cfg_netdev - Allocate, configure and register a netdev
3115 * @vsi: the VSI associated with the new netdev
3116 *
3117 * Returns 0 on success, negative value on failure
3118 */
3119static int ice_cfg_netdev(struct ice_vsi *vsi)
3120{
3121	struct ice_netdev_priv *np;
3122	struct net_device *netdev;
3123	u8 mac_addr[ETH_ALEN];
3124
3125	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3126				    vsi->alloc_rxq);
3127	if (!netdev)
3128		return -ENOMEM;
3129
3130	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3131	vsi->netdev = netdev;
3132	np = netdev_priv(netdev);
3133	np->vsi = vsi;
3134
3135	ice_set_netdev_features(netdev);
3136
3137	ice_set_ops(netdev);
3138
3139	if (vsi->type == ICE_VSI_PF) {
3140		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3141		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3142		ether_addr_copy(netdev->dev_addr, mac_addr);
3143		ether_addr_copy(netdev->perm_addr, mac_addr);
3144	}
3145
3146	netdev->priv_flags |= IFF_UNICAST_FLT;
3147
3148	/* Setup netdev TC information */
3149	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3150
3151	/* setup watchdog timeout value to be 5 second */
3152	netdev->watchdog_timeo = 5 * HZ;
3153
3154	netdev->min_mtu = ETH_MIN_MTU;
3155	netdev->max_mtu = ICE_MAX_MTU;
3156
3157	return 0;
3158}
3159
3160/**
3161 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3162 * @lut: Lookup table
3163 * @rss_table_size: Lookup table size
3164 * @rss_size: Range of queue number for hashing
3165 */
3166void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3167{
3168	u16 i;
3169
3170	for (i = 0; i < rss_table_size; i++)
3171		lut[i] = i % rss_size;
3172}
3173
3174/**
3175 * ice_pf_vsi_setup - Set up a PF VSI
3176 * @pf: board private structure
3177 * @pi: pointer to the port_info instance
3178 *
3179 * Returns pointer to the successfully allocated VSI software struct
3180 * on success, otherwise returns NULL on failure.
3181 */
3182static struct ice_vsi *
3183ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3184{
3185	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3186}
3187
3188/**
3189 * ice_ctrl_vsi_setup - Set up a control VSI
3190 * @pf: board private structure
3191 * @pi: pointer to the port_info instance
3192 *
3193 * Returns pointer to the successfully allocated VSI software struct
3194 * on success, otherwise returns NULL on failure.
3195 */
3196static struct ice_vsi *
3197ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3198{
3199	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
 
 
 
 
 
 
3200}
3201
3202/**
3203 * ice_lb_vsi_setup - Set up a loopback VSI
3204 * @pf: board private structure
3205 * @pi: pointer to the port_info instance
3206 *
3207 * Returns pointer to the successfully allocated VSI software struct
3208 * on success, otherwise returns NULL on failure.
3209 */
3210struct ice_vsi *
3211ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3212{
3213	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
 
 
 
 
 
 
3214}
3215
3216/**
3217 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3218 * @netdev: network interface to be adjusted
3219 * @proto: unused protocol
3220 * @vid: VLAN ID to be added
3221 *
3222 * net_device_ops implementation for adding VLAN IDs
3223 */
3224static int
3225ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3226		    u16 vid)
3227{
3228	struct ice_netdev_priv *np = netdev_priv(netdev);
 
3229	struct ice_vsi *vsi = np->vsi;
 
3230	int ret;
3231
3232	/* VLAN 0 is added by default during load/reset */
3233	if (!vid)
3234		return 0;
3235
3236	/* Enable VLAN pruning when a VLAN other than 0 is added */
3237	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3238		ret = ice_cfg_vlan_pruning(vsi, true, false);
 
 
 
 
 
 
 
3239		if (ret)
3240			return ret;
3241	}
3242
 
 
3243	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3244	 * packets aren't pruned by the device's internal switch on Rx
3245	 */
3246	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3247	if (!ret)
3248		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3249
3250	return ret;
3251}
3252
3253/**
3254 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3255 * @netdev: network interface to be adjusted
3256 * @proto: unused protocol
3257 * @vid: VLAN ID to be removed
3258 *
3259 * net_device_ops implementation for removing VLAN IDs
3260 */
3261static int
3262ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3263		     u16 vid)
3264{
3265	struct ice_netdev_priv *np = netdev_priv(netdev);
 
3266	struct ice_vsi *vsi = np->vsi;
 
3267	int ret;
3268
3269	/* don't allow removal of VLAN 0 */
3270	if (!vid)
3271		return 0;
3272
3273	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
 
 
 
 
 
 
 
 
 
 
 
 
 
3274	 * information
3275	 */
3276	ret = ice_vsi_kill_vlan(vsi, vid);
 
3277	if (ret)
3278		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3279
3280	/* Disable pruning when VLAN 0 is the only VLAN rule */
3281	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3282		ret = ice_cfg_vlan_pruning(vsi, false, false);
3283
3284	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3285	return ret;
3286}
3287
3288/**
3289 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3290 * @pf: board private structure
3291 *
3292 * Returns 0 on success, negative value on failure
3293 */
3294static int ice_setup_pf_sw(struct ice_pf *pf)
3295{
3296	struct ice_vsi *vsi;
3297	int status = 0;
3298
3299	if (ice_is_reset_in_progress(pf->state))
3300		return -EBUSY;
 
3301
3302	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3303	if (!vsi)
3304		return -ENOMEM;
 
 
 
 
3305
3306	status = ice_cfg_netdev(vsi);
3307	if (status) {
3308		status = -ENODEV;
3309		goto unroll_vsi_setup;
3310	}
3311	/* netdev has to be configured before setting frame size */
3312	ice_vsi_cfg_frame_size(vsi);
3313
3314	/* Setup DCB netlink interface */
3315	ice_dcbnl_setup(vsi);
 
 
 
 
 
 
 
3316
3317	/* registering the NAPI handler requires both the queues and
3318	 * netdev to be created, which are done in ice_pf_vsi_setup()
3319	 * and ice_cfg_netdev() respectively
3320	 */
3321	ice_napi_add(vsi);
3322
3323	status = ice_set_cpu_rx_rmap(vsi);
3324	if (status) {
3325		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3326			vsi->vsi_num, status);
3327		status = -EINVAL;
3328		goto unroll_napi_add;
3329	}
3330	status = ice_init_mac_fltr(pf);
3331	if (status)
3332		goto free_cpu_rx_map;
3333
3334	return status;
3335
3336free_cpu_rx_map:
3337	ice_free_cpu_rx_rmap(vsi);
3338
3339unroll_napi_add:
3340	if (vsi) {
3341		ice_napi_del(vsi);
3342		if (vsi->netdev) {
3343			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3344			free_netdev(vsi->netdev);
3345			vsi->netdev = NULL;
3346		}
3347	}
3348
3349unroll_vsi_setup:
3350	ice_vsi_release(vsi);
3351	return status;
3352}
3353
3354/**
3355 * ice_get_avail_q_count - Get count of queues in use
3356 * @pf_qmap: bitmap to get queue use count from
3357 * @lock: pointer to a mutex that protects access to pf_qmap
3358 * @size: size of the bitmap
3359 */
3360static u16
3361ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3362{
3363	unsigned long bit;
3364	u16 count = 0;
3365
3366	mutex_lock(lock);
3367	for_each_clear_bit(bit, pf_qmap, size)
3368		count++;
3369	mutex_unlock(lock);
3370
3371	return count;
3372}
3373
3374/**
3375 * ice_get_avail_txq_count - Get count of Tx queues in use
3376 * @pf: pointer to an ice_pf instance
3377 */
3378u16 ice_get_avail_txq_count(struct ice_pf *pf)
3379{
3380	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3381				     pf->max_pf_txqs);
3382}
3383
3384/**
3385 * ice_get_avail_rxq_count - Get count of Rx queues in use
3386 * @pf: pointer to an ice_pf instance
3387 */
3388u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3389{
3390	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3391				     pf->max_pf_rxqs);
3392}
3393
3394/**
3395 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3396 * @pf: board private structure to initialize
3397 */
3398static void ice_deinit_pf(struct ice_pf *pf)
3399{
3400	ice_service_task_stop(pf);
 
 
3401	mutex_destroy(&pf->sw_mutex);
3402	mutex_destroy(&pf->tc_mutex);
3403	mutex_destroy(&pf->avail_q_mutex);
 
3404
3405	if (pf->avail_txqs) {
3406		bitmap_free(pf->avail_txqs);
3407		pf->avail_txqs = NULL;
3408	}
3409
3410	if (pf->avail_rxqs) {
3411		bitmap_free(pf->avail_rxqs);
3412		pf->avail_rxqs = NULL;
3413	}
3414
3415	if (pf->ptp.clock)
3416		ptp_clock_unregister(pf->ptp.clock);
3417}
3418
3419/**
3420 * ice_set_pf_caps - set PFs capability flags
3421 * @pf: pointer to the PF instance
3422 */
3423static void ice_set_pf_caps(struct ice_pf *pf)
3424{
3425	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3426
3427	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3428	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3429	if (func_caps->common_cap.rdma) {
3430		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3431		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3432	}
3433	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3434	if (func_caps->common_cap.dcb)
3435		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3436	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3437	if (func_caps->common_cap.sr_iov_1_1) {
3438		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3439		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3440					      ICE_MAX_VF_COUNT);
3441	}
3442	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3443	if (func_caps->common_cap.rss_table_size)
3444		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3445
3446	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3447	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3448		u16 unused;
3449
3450		/* ctrl_vsi_idx will be set to a valid value when flow director
3451		 * is setup by ice_init_fdir
3452		 */
3453		pf->ctrl_vsi_idx = ICE_NO_VSI;
3454		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3455		/* force guaranteed filter pool for PF */
3456		ice_alloc_fd_guar_item(&pf->hw, &unused,
3457				       func_caps->fd_fltr_guar);
3458		/* force shared filter pool for PF */
3459		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3460				       func_caps->fd_fltr_best_effort);
3461	}
3462
3463	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3464	if (func_caps->common_cap.ieee_1588)
 
3465		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3466
3467	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3468	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3469}
3470
3471/**
3472 * ice_init_pf - Initialize general software structures (struct ice_pf)
3473 * @pf: board private structure to initialize
3474 */
3475static int ice_init_pf(struct ice_pf *pf)
3476{
3477	ice_set_pf_caps(pf);
3478
3479	mutex_init(&pf->sw_mutex);
3480	mutex_init(&pf->tc_mutex);
 
 
3481
3482	INIT_HLIST_HEAD(&pf->aq_wait_list);
3483	spin_lock_init(&pf->aq_wait_lock);
3484	init_waitqueue_head(&pf->aq_wait_queue);
3485
3486	init_waitqueue_head(&pf->reset_wait_queue);
3487
3488	/* setup service timer and periodic service task */
3489	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3490	pf->serv_tmr_period = HZ;
3491	INIT_WORK(&pf->serv_task, ice_service_task);
3492	clear_bit(ICE_SERVICE_SCHED, pf->state);
3493
3494	mutex_init(&pf->avail_q_mutex);
3495	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3496	if (!pf->avail_txqs)
3497		return -ENOMEM;
3498
3499	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3500	if (!pf->avail_rxqs) {
3501		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3502		pf->avail_txqs = NULL;
3503		return -ENOMEM;
3504	}
3505
3506	return 0;
3507}
3508
3509/**
3510 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3511 * @pf: board private structure
3512 *
3513 * compute the number of MSIX vectors required (v_budget) and request from
3514 * the OS. Return the number of vectors reserved or negative on failure
3515 */
3516static int ice_ena_msix_range(struct ice_pf *pf)
3517{
3518	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3519	struct device *dev = ice_pf_to_dev(pf);
3520	int needed, err, i;
3521
3522	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3523	num_cpus = num_online_cpus();
3524
3525	/* reserve for LAN miscellaneous handler */
3526	needed = ICE_MIN_LAN_OICR_MSIX;
3527	if (v_left < needed)
3528		goto no_hw_vecs_left_err;
3529	v_budget += needed;
3530	v_left -= needed;
3531
3532	/* reserve for flow director */
3533	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3534		needed = ICE_FDIR_MSIX;
3535		if (v_left < needed)
3536			goto no_hw_vecs_left_err;
3537		v_budget += needed;
3538		v_left -= needed;
3539	}
3540
3541	/* total used for non-traffic vectors */
3542	v_other = v_budget;
3543
3544	/* reserve vectors for LAN traffic */
3545	needed = num_cpus;
3546	if (v_left < needed)
3547		goto no_hw_vecs_left_err;
3548	pf->num_lan_msix = needed;
3549	v_budget += needed;
3550	v_left -= needed;
3551
3552	/* reserve vectors for RDMA auxiliary driver */
3553	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3554		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3555		if (v_left < needed)
3556			goto no_hw_vecs_left_err;
3557		pf->num_rdma_msix = needed;
3558		v_budget += needed;
3559		v_left -= needed;
3560	}
3561
3562	pf->msix_entries = devm_kcalloc(dev, v_budget,
3563					sizeof(*pf->msix_entries), GFP_KERNEL);
3564	if (!pf->msix_entries) {
3565		err = -ENOMEM;
3566		goto exit_err;
3567	}
3568
3569	for (i = 0; i < v_budget; i++)
3570		pf->msix_entries[i].entry = i;
3571
3572	/* actually reserve the vectors */
3573	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3574					 ICE_MIN_MSIX, v_budget);
3575	if (v_actual < 0) {
3576		dev_err(dev, "unable to reserve MSI-X vectors\n");
3577		err = v_actual;
3578		goto msix_err;
3579	}
3580
3581	if (v_actual < v_budget) {
3582		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3583			 v_budget, v_actual);
3584
3585		if (v_actual < ICE_MIN_MSIX) {
3586			/* error if we can't get minimum vectors */
3587			pci_disable_msix(pf->pdev);
3588			err = -ERANGE;
3589			goto msix_err;
3590		} else {
3591			int v_remain = v_actual - v_other;
3592			int v_rdma = 0, v_min_rdma = 0;
3593
3594			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3595				/* Need at least 1 interrupt in addition to
3596				 * AEQ MSIX
3597				 */
3598				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3599				v_min_rdma = ICE_MIN_RDMA_MSIX;
3600			}
3601
3602			if (v_actual == ICE_MIN_MSIX ||
3603			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3604				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3605				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3606
3607				pf->num_rdma_msix = 0;
3608				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3609			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3610				   (v_remain - v_rdma < v_rdma)) {
3611				/* Support minimum RDMA and give remaining
3612				 * vectors to LAN MSIX
3613				 */
3614				pf->num_rdma_msix = v_min_rdma;
3615				pf->num_lan_msix = v_remain - v_min_rdma;
3616			} else {
3617				/* Split remaining MSIX with RDMA after
3618				 * accounting for AEQ MSIX
3619				 */
3620				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3621						    ICE_RDMA_NUM_AEQ_MSIX;
3622				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3623			}
3624
3625			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3626				   pf->num_lan_msix);
3627
3628			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3629				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3630					   pf->num_rdma_msix);
3631		}
3632	}
3633
3634	return v_actual;
3635
3636msix_err:
3637	devm_kfree(dev, pf->msix_entries);
3638	goto exit_err;
3639
3640no_hw_vecs_left_err:
3641	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3642		needed, v_left);
3643	err = -ERANGE;
3644exit_err:
3645	pf->num_rdma_msix = 0;
3646	pf->num_lan_msix = 0;
3647	return err;
3648}
3649
3650/**
3651 * ice_dis_msix - Disable MSI-X interrupt setup in OS
3652 * @pf: board private structure
3653 */
3654static void ice_dis_msix(struct ice_pf *pf)
3655{
3656	pci_disable_msix(pf->pdev);
3657	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3658	pf->msix_entries = NULL;
3659}
3660
3661/**
3662 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3663 * @pf: board private structure
3664 */
3665static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3666{
3667	ice_dis_msix(pf);
3668
3669	if (pf->irq_tracker) {
3670		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3671		pf->irq_tracker = NULL;
3672	}
3673}
3674
3675/**
3676 * ice_init_interrupt_scheme - Determine proper interrupt scheme
3677 * @pf: board private structure to initialize
3678 */
3679static int ice_init_interrupt_scheme(struct ice_pf *pf)
3680{
3681	int vectors;
3682
3683	vectors = ice_ena_msix_range(pf);
3684
3685	if (vectors < 0)
3686		return vectors;
3687
3688	/* set up vector assignment tracking */
3689	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3690				       struct_size(pf->irq_tracker, list, vectors),
3691				       GFP_KERNEL);
3692	if (!pf->irq_tracker) {
3693		ice_dis_msix(pf);
3694		return -ENOMEM;
3695	}
3696
3697	/* populate SW interrupts pool with number of OS granted IRQs. */
3698	pf->num_avail_sw_msix = (u16)vectors;
3699	pf->irq_tracker->num_entries = (u16)vectors;
3700	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3701
3702	return 0;
3703}
3704
3705/**
3706 * ice_is_wol_supported - check if WoL is supported
3707 * @hw: pointer to hardware info
3708 *
3709 * Check if WoL is supported based on the HW configuration.
3710 * Returns true if NVM supports and enables WoL for this port, false otherwise
3711 */
3712bool ice_is_wol_supported(struct ice_hw *hw)
3713{
3714	u16 wol_ctrl;
3715
3716	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3717	 * word) indicates WoL is not supported on the corresponding PF ID.
3718	 */
3719	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3720		return false;
3721
3722	return !(BIT(hw->port_info->lport) & wol_ctrl);
3723}
3724
3725/**
3726 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3727 * @vsi: VSI being changed
3728 * @new_rx: new number of Rx queues
3729 * @new_tx: new number of Tx queues
 
3730 *
3731 * Only change the number of queues if new_tx, or new_rx is non-0.
3732 *
3733 * Returns 0 on success.
3734 */
3735int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3736{
3737	struct ice_pf *pf = vsi->back;
3738	int err = 0, timeout = 50;
3739
3740	if (!new_rx && !new_tx)
3741		return -EINVAL;
3742
3743	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3744		timeout--;
3745		if (!timeout)
3746			return -EBUSY;
3747		usleep_range(1000, 2000);
3748	}
3749
3750	if (new_tx)
3751		vsi->req_txq = (u16)new_tx;
3752	if (new_rx)
3753		vsi->req_rxq = (u16)new_rx;
3754
3755	/* set for the next time the netdev is started */
3756	if (!netif_running(vsi->netdev)) {
3757		ice_vsi_rebuild(vsi, false);
3758		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3759		goto done;
3760	}
3761
3762	ice_vsi_close(vsi);
3763	ice_vsi_rebuild(vsi, false);
3764	ice_pf_dcb_recfg(pf);
3765	ice_vsi_open(vsi);
3766done:
3767	clear_bit(ICE_CFG_BUSY, pf->state);
3768	return err;
3769}
3770
3771/**
3772 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3773 * @pf: PF to configure
3774 *
3775 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3776 * VSI can still Tx/Rx VLAN tagged packets.
3777 */
3778static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3779{
3780	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3781	struct ice_vsi_ctx *ctxt;
3782	enum ice_status status;
3783	struct ice_hw *hw;
 
3784
3785	if (!vsi)
3786		return;
3787
3788	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3789	if (!ctxt)
3790		return;
3791
3792	hw = &pf->hw;
3793	ctxt->info = vsi->info;
3794
3795	ctxt->info.valid_sections =
3796		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3797			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3798			    ICE_AQ_VSI_PROP_SW_VALID);
3799
3800	/* disable VLAN anti-spoof */
3801	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3802				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3803
3804	/* disable VLAN pruning and keep all other settings */
3805	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3806
3807	/* allow all VLANs on Tx and don't strip on Rx */
3808	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3809		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3810
3811	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3812	if (status) {
3813		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3814			ice_stat_str(status),
3815			ice_aq_str(hw->adminq.sq_last_status));
3816	} else {
3817		vsi->info.sec_flags = ctxt->info.sec_flags;
3818		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3819		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3820	}
3821
3822	kfree(ctxt);
3823}
3824
3825/**
3826 * ice_log_pkg_init - log result of DDP package load
3827 * @hw: pointer to hardware info
3828 * @status: status of package load
3829 */
3830static void
3831ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3832{
3833	struct ice_pf *pf = (struct ice_pf *)hw->back;
3834	struct device *dev = ice_pf_to_dev(pf);
 
 
3835
3836	switch (*status) {
3837	case ICE_SUCCESS:
3838		/* The package download AdminQ command returned success because
3839		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3840		 * already a package loaded on the device.
3841		 */
3842		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3843		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3844		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3845		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3846		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3847			    sizeof(hw->pkg_name))) {
3848			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3849				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3850					 hw->active_pkg_name,
3851					 hw->active_pkg_ver.major,
3852					 hw->active_pkg_ver.minor,
3853					 hw->active_pkg_ver.update,
3854					 hw->active_pkg_ver.draft);
3855			else
3856				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3857					 hw->active_pkg_name,
3858					 hw->active_pkg_ver.major,
3859					 hw->active_pkg_ver.minor,
3860					 hw->active_pkg_ver.update,
3861					 hw->active_pkg_ver.draft);
3862		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3863			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3864			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3865				hw->active_pkg_name,
3866				hw->active_pkg_ver.major,
3867				hw->active_pkg_ver.minor,
3868				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3869			*status = ICE_ERR_NOT_SUPPORTED;
3870		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3871			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3872			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3873				 hw->active_pkg_name,
3874				 hw->active_pkg_ver.major,
3875				 hw->active_pkg_ver.minor,
3876				 hw->active_pkg_ver.update,
3877				 hw->active_pkg_ver.draft,
3878				 hw->pkg_name,
3879				 hw->pkg_ver.major,
3880				 hw->pkg_ver.minor,
3881				 hw->pkg_ver.update,
3882				 hw->pkg_ver.draft);
3883		} else {
3884			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3885			*status = ICE_ERR_NOT_SUPPORTED;
3886		}
3887		break;
3888	case ICE_ERR_FW_DDP_MISMATCH:
3889		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3890		break;
3891	case ICE_ERR_BUF_TOO_SHORT:
3892	case ICE_ERR_CFG:
3893		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3894		break;
3895	case ICE_ERR_NOT_SUPPORTED:
3896		/* Package File version not supported */
3897		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3898		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3899		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3900			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3901		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3902			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3903			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3904			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3905				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3906		break;
3907	case ICE_ERR_AQ_ERROR:
3908		switch (hw->pkg_dwnld_status) {
3909		case ICE_AQ_RC_ENOSEC:
3910		case ICE_AQ_RC_EBADSIG:
3911			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3912			return;
3913		case ICE_AQ_RC_ESVN:
3914			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3915			return;
3916		case ICE_AQ_RC_EBADMAN:
3917		case ICE_AQ_RC_EBADBUF:
3918			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3919			/* poll for reset to complete */
3920			if (ice_check_reset(hw))
3921				dev_err(dev, "Error resetting device. Please reload the driver\n");
3922			return;
3923		default:
3924			break;
3925		}
3926		fallthrough;
3927	default:
3928		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3929			*status);
3930		break;
3931	}
3932}
3933
3934/**
3935 * ice_load_pkg - load/reload the DDP Package file
3936 * @firmware: firmware structure when firmware requested or NULL for reload
3937 * @pf: pointer to the PF instance
3938 *
3939 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3940 * initialize HW tables.
3941 */
3942static void
3943ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3944{
3945	enum ice_status status = ICE_ERR_PARAM;
3946	struct device *dev = ice_pf_to_dev(pf);
3947	struct ice_hw *hw = &pf->hw;
3948
3949	/* Load DDP Package */
3950	if (firmware && !hw->pkg_copy) {
3951		status = ice_copy_and_init_pkg(hw, firmware->data,
3952					       firmware->size);
3953		ice_log_pkg_init(hw, &status);
3954	} else if (!firmware && hw->pkg_copy) {
3955		/* Reload package during rebuild after CORER/GLOBR reset */
3956		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3957		ice_log_pkg_init(hw, &status);
3958	} else {
3959		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3960	}
3961
3962	if (status) {
3963		/* Safe Mode */
3964		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3965		return;
3966	}
3967
3968	/* Successful download package is the precondition for advanced
3969	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3970	 */
3971	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3972}
3973
3974/**
3975 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3976 * @pf: pointer to the PF structure
3977 *
3978 * There is no error returned here because the driver should be able to handle
3979 * 128 Byte cache lines, so we only print a warning in case issues are seen,
3980 * specifically with Tx.
3981 */
3982static void ice_verify_cacheline_size(struct ice_pf *pf)
3983{
3984	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3985		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3986			 ICE_CACHE_LINE_BYTES);
3987}
3988
3989/**
3990 * ice_send_version - update firmware with driver version
3991 * @pf: PF struct
3992 *
3993 * Returns ICE_SUCCESS on success, else error code
3994 */
3995static enum ice_status ice_send_version(struct ice_pf *pf)
3996{
3997	struct ice_driver_ver dv;
3998
3999	dv.major_ver = 0xff;
4000	dv.minor_ver = 0xff;
4001	dv.build_ver = 0xff;
4002	dv.subbuild_ver = 0;
4003	strscpy((char *)dv.driver_string, UTS_RELEASE,
4004		sizeof(dv.driver_string));
4005	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4006}
4007
4008/**
4009 * ice_init_fdir - Initialize flow director VSI and configuration
4010 * @pf: pointer to the PF instance
4011 *
4012 * returns 0 on success, negative on error
4013 */
4014static int ice_init_fdir(struct ice_pf *pf)
4015{
4016	struct device *dev = ice_pf_to_dev(pf);
4017	struct ice_vsi *ctrl_vsi;
4018	int err;
4019
4020	/* Side Band Flow Director needs to have a control VSI.
4021	 * Allocate it and store it in the PF.
4022	 */
4023	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4024	if (!ctrl_vsi) {
4025		dev_dbg(dev, "could not create control VSI\n");
4026		return -ENOMEM;
4027	}
4028
4029	err = ice_vsi_open_ctrl(ctrl_vsi);
4030	if (err) {
4031		dev_dbg(dev, "could not open control VSI\n");
4032		goto err_vsi_open;
4033	}
4034
4035	mutex_init(&pf->hw.fdir_fltr_lock);
4036
4037	err = ice_fdir_create_dflt_rules(pf);
4038	if (err)
4039		goto err_fdir_rule;
4040
4041	return 0;
4042
4043err_fdir_rule:
4044	ice_fdir_release_flows(&pf->hw);
4045	ice_vsi_close(ctrl_vsi);
4046err_vsi_open:
4047	ice_vsi_release(ctrl_vsi);
4048	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4049		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4050		pf->ctrl_vsi_idx = ICE_NO_VSI;
4051	}
4052	return err;
4053}
4054
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4055/**
4056 * ice_get_opt_fw_name - return optional firmware file name or NULL
4057 * @pf: pointer to the PF instance
4058 */
4059static char *ice_get_opt_fw_name(struct ice_pf *pf)
4060{
4061	/* Optional firmware name same as default with additional dash
4062	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4063	 */
4064	struct pci_dev *pdev = pf->pdev;
4065	char *opt_fw_filename;
4066	u64 dsn;
4067
4068	/* Determine the name of the optional file using the DSN (two
4069	 * dwords following the start of the DSN Capability).
4070	 */
4071	dsn = pci_get_dsn(pdev);
4072	if (!dsn)
4073		return NULL;
4074
4075	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4076	if (!opt_fw_filename)
4077		return NULL;
4078
4079	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4080		 ICE_DDP_PKG_PATH, dsn);
4081
4082	return opt_fw_filename;
4083}
4084
4085/**
4086 * ice_request_fw - Device initialization routine
4087 * @pf: pointer to the PF instance
4088 */
4089static void ice_request_fw(struct ice_pf *pf)
4090{
4091	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4092	const struct firmware *firmware = NULL;
4093	struct device *dev = ice_pf_to_dev(pf);
4094	int err = 0;
4095
4096	/* optional device-specific DDP (if present) overrides the default DDP
4097	 * package file. kernel logs a debug message if the file doesn't exist,
4098	 * and warning messages for other errors.
4099	 */
4100	if (opt_fw_filename) {
4101		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4102		if (err) {
4103			kfree(opt_fw_filename);
4104			goto dflt_pkg_load;
4105		}
4106
4107		/* request for firmware was successful. Download to device */
4108		ice_load_pkg(firmware, pf);
4109		kfree(opt_fw_filename);
4110		release_firmware(firmware);
4111		return;
4112	}
4113
4114dflt_pkg_load:
4115	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4116	if (err) {
4117		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4118		return;
4119	}
4120
4121	/* request for firmware was successful. Download to device */
4122	ice_load_pkg(firmware, pf);
4123	release_firmware(firmware);
4124}
4125
4126/**
4127 * ice_print_wake_reason - show the wake up cause in the log
4128 * @pf: pointer to the PF struct
4129 */
4130static void ice_print_wake_reason(struct ice_pf *pf)
4131{
4132	u32 wus = pf->wakeup_reason;
4133	const char *wake_str;
4134
4135	/* if no wake event, nothing to print */
4136	if (!wus)
4137		return;
4138
4139	if (wus & PFPM_WUS_LNKC_M)
4140		wake_str = "Link\n";
4141	else if (wus & PFPM_WUS_MAG_M)
4142		wake_str = "Magic Packet\n";
4143	else if (wus & PFPM_WUS_MNG_M)
4144		wake_str = "Management\n";
4145	else if (wus & PFPM_WUS_FW_RST_WK_M)
4146		wake_str = "Firmware Reset\n";
4147	else
4148		wake_str = "Unknown\n";
4149
4150	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4151}
4152
4153/**
4154 * ice_register_netdev - register netdev and devlink port
4155 * @pf: pointer to the PF struct
 
 
4156 */
4157static int ice_register_netdev(struct ice_pf *pf)
4158{
4159	struct ice_vsi *vsi;
4160	int err = 0;
 
 
 
 
 
 
 
 
 
 
4161
4162	vsi = ice_get_main_vsi(pf);
4163	if (!vsi || !vsi->netdev)
4164		return -EIO;
4165
4166	err = register_netdev(vsi->netdev);
4167	if (err)
4168		goto err_register_netdev;
4169
4170	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4171	netif_carrier_off(vsi->netdev);
4172	netif_tx_stop_all_queues(vsi->netdev);
4173	err = ice_devlink_create_port(vsi);
4174	if (err)
4175		goto err_devlink_create;
4176
4177	devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
 
 
 
 
 
 
4178
4179	return 0;
4180err_devlink_create:
4181	unregister_netdev(vsi->netdev);
4182	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4183err_register_netdev:
4184	free_netdev(vsi->netdev);
4185	vsi->netdev = NULL;
4186	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4187	return err;
4188}
4189
4190/**
4191 * ice_probe - Device initialization routine
4192 * @pdev: PCI device information struct
4193 * @ent: entry in ice_pci_tbl
4194 *
4195 * Returns 0 on success, negative on failure
4196 */
4197static int
4198ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4199{
4200	struct device *dev = &pdev->dev;
4201	struct ice_pf *pf;
4202	struct ice_hw *hw;
4203	int i, err;
4204
4205	if (pdev->is_virtfn) {
4206		dev_err(dev, "can't probe a virtual function\n");
4207		return -EINVAL;
4208	}
4209
4210	/* this driver uses devres, see
4211	 * Documentation/driver-api/driver-model/devres.rst
4212	 */
4213	err = pcim_enable_device(pdev);
4214	if (err)
4215		return err;
4216
4217	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4218	if (err) {
4219		dev_err(dev, "BAR0 I/O map error %d\n", err);
4220		return err;
4221	}
4222
4223	pf = ice_allocate_pf(dev);
4224	if (!pf)
4225		return -ENOMEM;
4226
4227	/* initialize Auxiliary index to invalid value */
4228	pf->aux_idx = -1;
4229
4230	/* set up for high or low DMA */
4231	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4232	if (err)
4233		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4234	if (err) {
4235		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4236		return err;
4237	}
4238
4239	pci_enable_pcie_error_reporting(pdev);
4240	pci_set_master(pdev);
4241
4242	pf->pdev = pdev;
4243	pci_set_drvdata(pdev, pf);
4244	set_bit(ICE_DOWN, pf->state);
4245	/* Disable service task until DOWN bit is cleared */
4246	set_bit(ICE_SERVICE_DIS, pf->state);
4247
4248	hw = &pf->hw;
4249	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4250	pci_save_state(pdev);
4251
4252	hw->back = pf;
4253	hw->vendor_id = pdev->vendor;
4254	hw->device_id = pdev->device;
4255	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4256	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4257	hw->subsystem_device_id = pdev->subsystem_device;
4258	hw->bus.device = PCI_SLOT(pdev->devfn);
4259	hw->bus.func = PCI_FUNC(pdev->devfn);
4260	ice_set_ctrlq_len(hw);
4261
4262	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
 
 
 
 
 
4263
4264	err = ice_devlink_register(pf);
4265	if (err) {
4266		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4267		goto err_exit_unroll;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4268	}
4269
4270#ifndef CONFIG_DYNAMIC_DEBUG
4271	if (debug < -1)
4272		hw->debug_mask = debug;
4273#endif
 
 
 
 
4274
4275	err = ice_init_hw(hw);
4276	if (err) {
4277		dev_err(dev, "ice_init_hw failed: %d\n", err);
4278		err = -EIO;
4279		goto err_exit_unroll;
 
 
 
 
 
 
 
 
 
 
 
4280	}
4281
 
 
4282	ice_request_fw(pf);
4283
4284	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4285	 * set in pf->state, which will cause ice_is_safe_mode to return
4286	 * true
4287	 */
4288	if (ice_is_safe_mode(pf)) {
4289		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4290		/* we already got function/device capabilities but these don't
4291		 * reflect what the driver needs to do in safe mode. Instead of
4292		 * adding conditional logic everywhere to ignore these
4293		 * device/function capabilities, override them.
4294		 */
4295		ice_set_safe_mode_caps(hw);
4296	}
4297
4298	err = ice_init_pf(pf);
4299	if (err) {
4300		dev_err(dev, "ice_init_pf failed: %d\n", err);
4301		goto err_init_pf_unroll;
4302	}
4303
4304	ice_devlink_init_regions(pf);
4305
4306	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4307	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4308	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4309	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4310	i = 0;
4311	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4312		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4313			pf->hw.tnl.valid_count[TNL_VXLAN];
4314		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4315			UDP_TUNNEL_TYPE_VXLAN;
4316		i++;
4317	}
4318	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4319		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4320			pf->hw.tnl.valid_count[TNL_GENEVE];
4321		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4322			UDP_TUNNEL_TYPE_GENEVE;
4323		i++;
4324	}
4325
4326	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4327	if (!pf->num_alloc_vsi) {
4328		err = -EIO;
4329		goto err_init_pf_unroll;
4330	}
4331	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4332		dev_warn(&pf->pdev->dev,
4333			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4334			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4335		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4336	}
4337
4338	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4339			       GFP_KERNEL);
4340	if (!pf->vsi) {
4341		err = -ENOMEM;
4342		goto err_init_pf_unroll;
4343	}
4344
4345	err = ice_init_interrupt_scheme(pf);
4346	if (err) {
4347		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4348		err = -EIO;
4349		goto err_init_vsi_unroll;
4350	}
4351
4352	/* In case of MSIX we are going to setup the misc vector right here
4353	 * to handle admin queue events etc. In case of legacy and MSI
4354	 * the misc functionality and queue processing is combined in
4355	 * the same vector and that gets setup at open.
4356	 */
4357	err = ice_req_irq_msix_misc(pf);
4358	if (err) {
4359		dev_err(dev, "setup of misc vector failed: %d\n", err);
4360		goto err_init_interrupt_unroll;
4361	}
4362
4363	/* create switch struct for the switch element created by FW on boot */
4364	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4365	if (!pf->first_sw) {
4366		err = -ENOMEM;
4367		goto err_msix_misc_unroll;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4368	}
4369
4370	if (hw->evb_veb)
4371		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4372	else
4373		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
 
 
 
 
 
 
4374
4375	pf->first_sw->pf = pf;
 
 
 
 
 
 
 
 
 
 
 
 
4376
4377	/* record the sw_id available for later use */
4378	pf->first_sw->sw_id = hw->port_info->sw_id;
 
 
4379
4380	err = ice_setup_pf_sw(pf);
4381	if (err) {
4382		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4383		goto err_alloc_sw_unroll;
4384	}
4385
4386	clear_bit(ICE_SERVICE_DIS, pf->state);
 
4387
4388	/* tell the firmware we are up */
4389	err = ice_send_version(pf);
4390	if (err) {
4391		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4392			UTS_RELEASE, err);
4393		goto err_send_version_unroll;
4394	}
4395
4396	/* since everything is good, start the service timer */
4397	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
 
 
4398
4399	err = ice_init_link_events(pf->hw.port_info);
4400	if (err) {
4401		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4402		goto err_send_version_unroll;
4403	}
4404
4405	/* not a fatal error if this fails */
4406	err = ice_init_nvm_phy_type(pf->hw.port_info);
4407	if (err)
4408		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4409
4410	/* not a fatal error if this fails */
4411	err = ice_update_link_info(pf->hw.port_info);
4412	if (err)
4413		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4414
4415	ice_init_link_dflt_override(pf->hw.port_info);
4416
4417	ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
 
4418
4419	/* if media available, initialize PHY settings */
4420	if (pf->hw.port_info->phy.link_info.link_info &
4421	    ICE_AQ_MEDIA_AVAILABLE) {
4422		/* not a fatal error if this fails */
4423		err = ice_init_phy_user_cfg(pf->hw.port_info);
4424		if (err)
4425			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4426
4427		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4428			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4429
4430			if (vsi)
4431				ice_configure_phy(vsi);
4432		}
4433	} else {
4434		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4435	}
4436
4437	ice_verify_cacheline_size(pf);
 
 
 
 
 
 
 
 
 
 
 
 
4438
4439	/* Save wakeup reason register for later use */
4440	pf->wakeup_reason = rd32(hw, PFPM_WUS);
 
 
4441
4442	/* check for a power management event */
4443	ice_print_wake_reason(pf);
4444
4445	/* clear wake status, all bits */
4446	wr32(hw, PFPM_WUS, U32_MAX);
4447
4448	/* Disable WoL at init, wait for user to enable */
4449	device_set_wakeup_enable(dev, false);
 
4450
4451	if (ice_is_safe_mode(pf)) {
4452		ice_set_safe_mode_vlan_cfg(pf);
4453		goto probe_done;
 
4454	}
4455
4456	/* initialize DDP driven features */
4457	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4458		ice_ptp_init(pf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4459
4460	/* Note: Flow director init failure is non-fatal to load */
4461	if (ice_init_fdir(pf))
4462		dev_err(dev, "could not initialize flow director\n");
 
4463
4464	/* Note: DCB init failure is non-fatal to load */
4465	if (ice_init_pf_dcb(pf, false)) {
4466		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4467		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4468	} else {
4469		ice_cfg_lldp_mib_change(&pf->hw, true);
4470	}
4471
4472	if (ice_init_lag(pf))
4473		dev_warn(dev, "Failed to init link aggregation support\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4474
4475	/* print PCI link speed and width */
4476	pcie_print_link_status(pf->pdev);
4477
4478probe_done:
4479	err = ice_register_netdev(pf);
4480	if (err)
4481		goto err_netdev_reg;
 
 
 
 
 
 
 
 
 
 
 
 
4482
4483	/* ready to go, so clear down state bit */
4484	clear_bit(ICE_DOWN, pf->state);
4485	if (ice_is_aux_ena(pf)) {
4486		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4487		if (pf->aux_idx < 0) {
4488			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4489			err = -ENOMEM;
4490			goto err_netdev_reg;
4491		}
4492
4493		err = ice_init_rdma(pf);
4494		if (err) {
4495			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4496			err = -EIO;
4497			goto err_init_aux_unroll;
4498		}
4499	} else {
4500		dev_warn(dev, "RDMA is not supported on this device\n");
4501	}
4502
4503	return 0;
4504
4505err_init_aux_unroll:
4506	pf->adev = NULL;
4507	ida_free(&ice_aux_ida, pf->aux_idx);
4508err_netdev_reg:
4509err_send_version_unroll:
4510	ice_vsi_release_all(pf);
4511err_alloc_sw_unroll:
 
 
 
 
4512	set_bit(ICE_SERVICE_DIS, pf->state);
4513	set_bit(ICE_DOWN, pf->state);
4514	devm_kfree(dev, pf->first_sw);
4515err_msix_misc_unroll:
4516	ice_free_irq_msix_misc(pf);
4517err_init_interrupt_unroll:
4518	ice_clear_interrupt_scheme(pf);
4519err_init_vsi_unroll:
4520	devm_kfree(dev, pf->vsi);
4521err_init_pf_unroll:
4522	ice_deinit_pf(pf);
4523	ice_devlink_destroy_regions(pf);
4524	ice_deinit_hw(hw);
4525err_exit_unroll:
4526	ice_devlink_unregister(pf);
4527	pci_disable_pcie_error_reporting(pdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4528	pci_disable_device(pdev);
4529	return err;
4530}
4531
4532/**
4533 * ice_set_wake - enable or disable Wake on LAN
4534 * @pf: pointer to the PF struct
4535 *
4536 * Simple helper for WoL control
4537 */
4538static void ice_set_wake(struct ice_pf *pf)
4539{
4540	struct ice_hw *hw = &pf->hw;
4541	bool wol = pf->wol_ena;
4542
4543	/* clear wake state, otherwise new wake events won't fire */
4544	wr32(hw, PFPM_WUS, U32_MAX);
4545
4546	/* enable / disable APM wake up, no RMW needed */
4547	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4548
4549	/* set magic packet filter enabled */
4550	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4551}
4552
4553/**
4554 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4555 * @pf: pointer to the PF struct
4556 *
4557 * Issue firmware command to enable multicast magic wake, making
4558 * sure that any locally administered address (LAA) is used for
4559 * wake, and that PF reset doesn't undo the LAA.
4560 */
4561static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4562{
4563	struct device *dev = ice_pf_to_dev(pf);
4564	struct ice_hw *hw = &pf->hw;
4565	enum ice_status status;
4566	u8 mac_addr[ETH_ALEN];
4567	struct ice_vsi *vsi;
 
4568	u8 flags;
4569
4570	if (!pf->wol_ena)
4571		return;
4572
4573	vsi = ice_get_main_vsi(pf);
4574	if (!vsi)
4575		return;
4576
4577	/* Get current MAC address in case it's an LAA */
4578	if (vsi->netdev)
4579		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4580	else
4581		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4582
4583	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4584		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4585		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4586
4587	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4588	if (status)
4589		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4590			ice_stat_str(status),
4591			ice_aq_str(hw->adminq.sq_last_status));
4592}
4593
4594/**
4595 * ice_remove - Device removal routine
4596 * @pdev: PCI device information struct
4597 */
4598static void ice_remove(struct pci_dev *pdev)
4599{
4600	struct ice_pf *pf = pci_get_drvdata(pdev);
4601	int i;
4602
4603	if (!pf)
4604		return;
4605
4606	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4607		if (!ice_is_reset_in_progress(pf->state))
4608			break;
4609		msleep(100);
4610	}
4611
4612	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4613		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4614		ice_free_vfs(pf);
4615	}
4616
 
 
4617	ice_service_task_stop(pf);
4618
4619	ice_aq_cancel_waiting_tasks(pf);
4620	ice_unplug_aux_dev(pf);
4621	if (pf->aux_idx >= 0)
4622		ida_free(&ice_aux_ida, pf->aux_idx);
4623	set_bit(ICE_DOWN, pf->state);
4624
4625	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4626	ice_deinit_lag(pf);
4627	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4628		ice_ptp_release(pf);
4629	if (!ice_is_safe_mode(pf))
4630		ice_remove_arfs(pf);
 
 
 
 
 
 
 
 
 
 
4631	ice_setup_mc_magic_wake(pf);
4632	ice_vsi_release_all(pf);
4633	ice_set_wake(pf);
4634	ice_free_irq_msix_misc(pf);
4635	ice_for_each_vsi(pf, i) {
4636		if (!pf->vsi[i])
4637			continue;
4638		ice_vsi_free_q_vectors(pf->vsi[i]);
4639	}
4640	ice_deinit_pf(pf);
4641	ice_devlink_destroy_regions(pf);
4642	ice_deinit_hw(&pf->hw);
4643	ice_devlink_unregister(pf);
4644
4645	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4646	 * do it via ice_schedule_reset() since there is no need to rebuild
4647	 * and the service task is already stopped.
4648	 */
4649	ice_reset(&pf->hw, ICE_RESET_PFR);
4650	pci_wait_for_pending_transaction(pdev);
4651	ice_clear_interrupt_scheme(pf);
4652	pci_disable_pcie_error_reporting(pdev);
4653	pci_disable_device(pdev);
4654}
4655
4656/**
4657 * ice_shutdown - PCI callback for shutting down device
4658 * @pdev: PCI device information struct
4659 */
4660static void ice_shutdown(struct pci_dev *pdev)
4661{
4662	struct ice_pf *pf = pci_get_drvdata(pdev);
4663
4664	ice_remove(pdev);
4665
4666	if (system_state == SYSTEM_POWER_OFF) {
4667		pci_wake_from_d3(pdev, pf->wol_ena);
4668		pci_set_power_state(pdev, PCI_D3hot);
4669	}
4670}
4671
4672#ifdef CONFIG_PM
4673/**
4674 * ice_prepare_for_shutdown - prep for PCI shutdown
4675 * @pf: board private structure
4676 *
4677 * Inform or close all dependent features in prep for PCI device shutdown
4678 */
4679static void ice_prepare_for_shutdown(struct ice_pf *pf)
4680{
4681	struct ice_hw *hw = &pf->hw;
4682	u32 v;
4683
4684	/* Notify VFs of impending reset */
4685	if (ice_check_sq_alive(hw, &hw->mailboxq))
4686		ice_vc_notify_reset(pf);
4687
4688	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4689
4690	/* disable the VSIs and their queues that are not already DOWN */
4691	ice_pf_dis_all_vsi(pf, false);
4692
4693	ice_for_each_vsi(pf, v)
4694		if (pf->vsi[v])
4695			pf->vsi[v]->vsi_num = 0;
4696
4697	ice_shutdown_all_ctrlq(hw);
4698}
4699
4700/**
4701 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4702 * @pf: board private structure to reinitialize
4703 *
4704 * This routine reinitialize interrupt scheme that was cleared during
4705 * power management suspend callback.
4706 *
4707 * This should be called during resume routine to re-allocate the q_vectors
4708 * and reacquire interrupts.
4709 */
4710static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4711{
4712	struct device *dev = ice_pf_to_dev(pf);
4713	int ret, v;
4714
4715	/* Since we clear MSIX flag during suspend, we need to
4716	 * set it back during resume...
4717	 */
4718
4719	ret = ice_init_interrupt_scheme(pf);
4720	if (ret) {
4721		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4722		return ret;
4723	}
4724
4725	/* Remap vectors and rings, after successful re-init interrupts */
4726	ice_for_each_vsi(pf, v) {
4727		if (!pf->vsi[v])
4728			continue;
4729
4730		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4731		if (ret)
4732			goto err_reinit;
4733		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
 
4734	}
4735
4736	ret = ice_req_irq_msix_misc(pf);
4737	if (ret) {
4738		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4739			ret);
4740		goto err_reinit;
4741	}
4742
4743	return 0;
4744
4745err_reinit:
4746	while (v--)
4747		if (pf->vsi[v])
4748			ice_vsi_free_q_vectors(pf->vsi[v]);
4749
4750	return ret;
4751}
4752
4753/**
4754 * ice_suspend
4755 * @dev: generic device information structure
4756 *
4757 * Power Management callback to quiesce the device and prepare
4758 * for D3 transition.
4759 */
4760static int __maybe_unused ice_suspend(struct device *dev)
4761{
4762	struct pci_dev *pdev = to_pci_dev(dev);
4763	struct ice_pf *pf;
4764	int disabled, v;
4765
4766	pf = pci_get_drvdata(pdev);
4767
4768	if (!ice_pf_state_is_nominal(pf)) {
4769		dev_err(dev, "Device is not ready, no need to suspend it\n");
4770		return -EBUSY;
4771	}
4772
4773	/* Stop watchdog tasks until resume completion.
4774	 * Even though it is most likely that the service task is
4775	 * disabled if the device is suspended or down, the service task's
4776	 * state is controlled by a different state bit, and we should
4777	 * store and honor whatever state that bit is in at this point.
4778	 */
4779	disabled = ice_service_task_stop(pf);
4780
4781	ice_unplug_aux_dev(pf);
4782
4783	/* Already suspended?, then there is nothing to do */
4784	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
4785		if (!disabled)
4786			ice_service_task_restart(pf);
4787		return 0;
4788	}
4789
4790	if (test_bit(ICE_DOWN, pf->state) ||
4791	    ice_is_reset_in_progress(pf->state)) {
4792		dev_err(dev, "can't suspend device in reset or already down\n");
4793		if (!disabled)
4794			ice_service_task_restart(pf);
4795		return 0;
4796	}
4797
4798	ice_setup_mc_magic_wake(pf);
4799
4800	ice_prepare_for_shutdown(pf);
4801
4802	ice_set_wake(pf);
4803
4804	/* Free vectors, clear the interrupt scheme and release IRQs
4805	 * for proper hibernation, especially with large number of CPUs.
4806	 * Otherwise hibernation might fail when mapping all the vectors back
4807	 * to CPU0.
4808	 */
4809	ice_free_irq_msix_misc(pf);
4810	ice_for_each_vsi(pf, v) {
4811		if (!pf->vsi[v])
4812			continue;
4813		ice_vsi_free_q_vectors(pf->vsi[v]);
4814	}
4815	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4816	ice_clear_interrupt_scheme(pf);
4817
4818	pci_save_state(pdev);
4819	pci_wake_from_d3(pdev, pf->wol_ena);
4820	pci_set_power_state(pdev, PCI_D3hot);
4821	return 0;
4822}
4823
4824/**
4825 * ice_resume - PM callback for waking up from D3
4826 * @dev: generic device information structure
4827 */
4828static int __maybe_unused ice_resume(struct device *dev)
4829{
4830	struct pci_dev *pdev = to_pci_dev(dev);
4831	enum ice_reset_req reset_type;
4832	struct ice_pf *pf;
4833	struct ice_hw *hw;
4834	int ret;
4835
4836	pci_set_power_state(pdev, PCI_D0);
4837	pci_restore_state(pdev);
4838	pci_save_state(pdev);
4839
4840	if (!pci_device_is_present(pdev))
4841		return -ENODEV;
4842
4843	ret = pci_enable_device_mem(pdev);
4844	if (ret) {
4845		dev_err(dev, "Cannot enable device after suspend\n");
4846		return ret;
4847	}
4848
4849	pf = pci_get_drvdata(pdev);
4850	hw = &pf->hw;
4851
4852	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4853	ice_print_wake_reason(pf);
4854
4855	/* We cleared the interrupt scheme when we suspended, so we need to
4856	 * restore it now to resume device functionality.
4857	 */
4858	ret = ice_reinit_interrupt_scheme(pf);
4859	if (ret)
4860		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4861
4862	clear_bit(ICE_DOWN, pf->state);
4863	/* Now perform PF reset and rebuild */
4864	reset_type = ICE_RESET_PFR;
4865	/* re-enable service task for reset, but allow reset to schedule it */
4866	clear_bit(ICE_SERVICE_DIS, pf->state);
4867
4868	if (ice_schedule_reset(pf, reset_type))
4869		dev_err(dev, "Reset during resume failed.\n");
4870
4871	clear_bit(ICE_SUSPENDED, pf->state);
4872	ice_service_task_restart(pf);
4873
4874	/* Restart the service task */
4875	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4876
4877	return 0;
4878}
4879#endif /* CONFIG_PM */
4880
4881/**
4882 * ice_pci_err_detected - warning that PCI error has been detected
4883 * @pdev: PCI device information struct
4884 * @err: the type of PCI error
4885 *
4886 * Called to warn that something happened on the PCI bus and the error handling
4887 * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4888 */
4889static pci_ers_result_t
4890ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4891{
4892	struct ice_pf *pf = pci_get_drvdata(pdev);
4893
4894	if (!pf) {
4895		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4896			__func__, err);
4897		return PCI_ERS_RESULT_DISCONNECT;
4898	}
4899
4900	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4901		ice_service_task_stop(pf);
4902
4903		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4904			set_bit(ICE_PFR_REQ, pf->state);
4905			ice_prepare_for_reset(pf);
4906		}
4907	}
4908
4909	return PCI_ERS_RESULT_NEED_RESET;
4910}
4911
4912/**
4913 * ice_pci_err_slot_reset - a PCI slot reset has just happened
4914 * @pdev: PCI device information struct
4915 *
4916 * Called to determine if the driver can recover from the PCI slot reset by
4917 * using a register read to determine if the device is recoverable.
4918 */
4919static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4920{
4921	struct ice_pf *pf = pci_get_drvdata(pdev);
4922	pci_ers_result_t result;
4923	int err;
4924	u32 reg;
4925
4926	err = pci_enable_device_mem(pdev);
4927	if (err) {
4928		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4929			err);
4930		result = PCI_ERS_RESULT_DISCONNECT;
4931	} else {
4932		pci_set_master(pdev);
4933		pci_restore_state(pdev);
4934		pci_save_state(pdev);
4935		pci_wake_from_d3(pdev, false);
4936
4937		/* Check for life */
4938		reg = rd32(&pf->hw, GLGEN_RTRIG);
4939		if (!reg)
4940			result = PCI_ERS_RESULT_RECOVERED;
4941		else
4942			result = PCI_ERS_RESULT_DISCONNECT;
4943	}
4944
4945	err = pci_aer_clear_nonfatal_status(pdev);
4946	if (err)
4947		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4948			err);
4949		/* non-fatal, continue */
4950
4951	return result;
4952}
4953
4954/**
4955 * ice_pci_err_resume - restart operations after PCI error recovery
4956 * @pdev: PCI device information struct
4957 *
4958 * Called to allow the driver to bring things back up after PCI error and/or
4959 * reset recovery have finished
4960 */
4961static void ice_pci_err_resume(struct pci_dev *pdev)
4962{
4963	struct ice_pf *pf = pci_get_drvdata(pdev);
4964
4965	if (!pf) {
4966		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4967			__func__);
4968		return;
4969	}
4970
4971	if (test_bit(ICE_SUSPENDED, pf->state)) {
4972		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4973			__func__);
4974		return;
4975	}
4976
4977	ice_restore_all_vfs_msi_state(pdev);
4978
4979	ice_do_reset(pf, ICE_RESET_PFR);
4980	ice_service_task_restart(pf);
4981	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4982}
4983
4984/**
4985 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4986 * @pdev: PCI device information struct
4987 */
4988static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4989{
4990	struct ice_pf *pf = pci_get_drvdata(pdev);
4991
4992	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4993		ice_service_task_stop(pf);
4994
4995		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4996			set_bit(ICE_PFR_REQ, pf->state);
4997			ice_prepare_for_reset(pf);
4998		}
4999	}
5000}
5001
5002/**
5003 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5004 * @pdev: PCI device information struct
5005 */
5006static void ice_pci_err_reset_done(struct pci_dev *pdev)
5007{
5008	ice_pci_err_resume(pdev);
5009}
5010
5011/* ice_pci_tbl - PCI Device ID Table
5012 *
5013 * Wildcard entries (PCI_ANY_ID) should come last
5014 * Last entry must be all 0s
5015 *
5016 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5017 *   Class, Class Mask, private data (not used) }
5018 */
5019static const struct pci_device_id ice_pci_tbl[] = {
5020	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5021	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5022	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5023	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5024	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5025	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5026	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5027	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5028	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5029	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5030	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5031	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5032	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5033	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5034	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5035	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5036	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5037	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5038	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5039	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5040	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5041	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5042	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5043	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5044	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
 
 
 
 
 
 
 
 
 
5045	/* required last entry */
5046	{ 0, }
5047};
5048MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5049
5050static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5051
5052static const struct pci_error_handlers ice_pci_err_handler = {
5053	.error_detected = ice_pci_err_detected,
5054	.slot_reset = ice_pci_err_slot_reset,
5055	.reset_prepare = ice_pci_err_reset_prepare,
5056	.reset_done = ice_pci_err_reset_done,
5057	.resume = ice_pci_err_resume
5058};
5059
5060static struct pci_driver ice_driver = {
5061	.name = KBUILD_MODNAME,
5062	.id_table = ice_pci_tbl,
5063	.probe = ice_probe,
5064	.remove = ice_remove,
5065#ifdef CONFIG_PM
5066	.driver.pm = &ice_pm_ops,
5067#endif /* CONFIG_PM */
5068	.shutdown = ice_shutdown,
5069	.sriov_configure = ice_sriov_configure,
 
 
5070	.err_handler = &ice_pci_err_handler
5071};
5072
5073/**
5074 * ice_module_init - Driver registration routine
5075 *
5076 * ice_module_init is the first routine called when the driver is
5077 * loaded. All it does is register with the PCI subsystem.
5078 */
5079static int __init ice_module_init(void)
5080{
5081	int status;
5082
5083	pr_info("%s\n", ice_driver_string);
5084	pr_info("%s\n", ice_copyright);
5085
5086	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
 
 
5087	if (!ice_wq) {
5088		pr_err("Failed to create workqueue\n");
5089		return -ENOMEM;
 
 
 
 
 
 
5090	}
5091
 
 
5092	status = pci_register_driver(&ice_driver);
5093	if (status) {
5094		pr_err("failed to register PCI driver, err %d\n", status);
5095		destroy_workqueue(ice_wq);
5096	}
5097
 
 
 
 
 
 
 
5098	return status;
5099}
5100module_init(ice_module_init);
5101
5102/**
5103 * ice_module_exit - Driver exit cleanup routine
5104 *
5105 * ice_module_exit is called just before the driver is removed
5106 * from memory.
5107 */
5108static void __exit ice_module_exit(void)
5109{
5110	pci_unregister_driver(&ice_driver);
 
5111	destroy_workqueue(ice_wq);
 
5112	pr_info("module unloaded\n");
5113}
5114module_exit(ice_module_exit);
5115
5116/**
5117 * ice_set_mac_address - NDO callback to set MAC address
5118 * @netdev: network interface device structure
5119 * @pi: pointer to an address structure
5120 *
5121 * Returns 0 on success, negative on failure
5122 */
5123static int ice_set_mac_address(struct net_device *netdev, void *pi)
5124{
5125	struct ice_netdev_priv *np = netdev_priv(netdev);
5126	struct ice_vsi *vsi = np->vsi;
5127	struct ice_pf *pf = vsi->back;
5128	struct ice_hw *hw = &pf->hw;
5129	struct sockaddr *addr = pi;
5130	enum ice_status status;
5131	u8 old_mac[ETH_ALEN];
5132	u8 flags = 0;
5133	int err = 0;
5134	u8 *mac;
 
5135
5136	mac = (u8 *)addr->sa_data;
5137
5138	if (!is_valid_ether_addr(mac))
5139		return -EADDRNOTAVAIL;
5140
5141	if (ether_addr_equal(netdev->dev_addr, mac)) {
5142		netdev_dbg(netdev, "already using mac %pM\n", mac);
5143		return 0;
5144	}
5145
5146	if (test_bit(ICE_DOWN, pf->state) ||
5147	    ice_is_reset_in_progress(pf->state)) {
5148		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5149			   mac);
5150		return -EBUSY;
5151	}
5152
 
 
 
 
 
 
5153	netif_addr_lock_bh(netdev);
5154	ether_addr_copy(old_mac, netdev->dev_addr);
5155	/* change the netdev's MAC address */
5156	memcpy(netdev->dev_addr, mac, netdev->addr_len);
5157	netif_addr_unlock_bh(netdev);
5158
5159	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5160	status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5161	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
5162		err = -EADDRNOTAVAIL;
5163		goto err_update_filters;
5164	}
5165
5166	/* Add filter for new MAC. If filter exists, return success */
5167	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5168	if (status == ICE_ERR_ALREADY_EXISTS)
5169		/* Although this MAC filter is already present in hardware it's
5170		 * possible in some cases (e.g. bonding) that dev_addr was
5171		 * modified outside of the driver and needs to be restored back
5172		 * to this value.
5173		 */
5174		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5175	else if (status)
 
 
5176		/* error if the new filter addition failed */
5177		err = -EADDRNOTAVAIL;
 
5178
5179err_update_filters:
5180	if (err) {
5181		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5182			   mac);
5183		netif_addr_lock_bh(netdev);
5184		ether_addr_copy(netdev->dev_addr, old_mac);
5185		netif_addr_unlock_bh(netdev);
5186		return err;
5187	}
5188
5189	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5190		   netdev->dev_addr);
5191
5192	/* write new MAC address to the firmware */
5193	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5194	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5195	if (status) {
5196		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
5197			   mac, ice_stat_str(status));
5198	}
5199	return 0;
5200}
5201
5202/**
5203 * ice_set_rx_mode - NDO callback to set the netdev filters
5204 * @netdev: network interface device structure
5205 */
5206static void ice_set_rx_mode(struct net_device *netdev)
5207{
5208	struct ice_netdev_priv *np = netdev_priv(netdev);
5209	struct ice_vsi *vsi = np->vsi;
5210
5211	if (!vsi)
5212		return;
5213
5214	/* Set the flags to synchronize filters
5215	 * ndo_set_rx_mode may be triggered even without a change in netdev
5216	 * flags
5217	 */
5218	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5219	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5220	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5221
5222	/* schedule our worker thread which will take care of
5223	 * applying the new filter changes
5224	 */
5225	ice_service_task_schedule(vsi->back);
5226}
5227
5228/**
5229 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5230 * @netdev: network interface device structure
5231 * @queue_index: Queue ID
5232 * @maxrate: maximum bandwidth in Mbps
5233 */
5234static int
5235ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5236{
5237	struct ice_netdev_priv *np = netdev_priv(netdev);
5238	struct ice_vsi *vsi = np->vsi;
5239	enum ice_status status;
5240	u16 q_handle;
 
5241	u8 tc;
5242
5243	/* Validate maxrate requested is within permitted range */
5244	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5245		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5246			   maxrate, queue_index);
5247		return -EINVAL;
5248	}
5249
5250	q_handle = vsi->tx_rings[queue_index]->q_handle;
5251	tc = ice_dcb_get_tc(vsi, queue_index);
5252
 
 
 
 
 
 
 
5253	/* Set BW back to default, when user set maxrate to 0 */
5254	if (!maxrate)
5255		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5256					       q_handle, ICE_MAX_BW);
5257	else
5258		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5259					  q_handle, ICE_MAX_BW, maxrate * 1000);
5260	if (status) {
5261		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5262			   ice_stat_str(status));
5263		return -EIO;
5264	}
5265
5266	return 0;
5267}
5268
5269/**
5270 * ice_fdb_add - add an entry to the hardware database
5271 * @ndm: the input from the stack
5272 * @tb: pointer to array of nladdr (unused)
5273 * @dev: the net device pointer
5274 * @addr: the MAC address entry being added
5275 * @vid: VLAN ID
5276 * @flags: instructions from stack about fdb operation
5277 * @extack: netlink extended ack
5278 */
5279static int
5280ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5281	    struct net_device *dev, const unsigned char *addr, u16 vid,
5282	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5283{
5284	int err;
5285
5286	if (vid) {
5287		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5288		return -EINVAL;
5289	}
5290	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5291		netdev_err(dev, "FDB only supports static addresses\n");
5292		return -EINVAL;
5293	}
5294
5295	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5296		err = dev_uc_add_excl(dev, addr);
5297	else if (is_multicast_ether_addr(addr))
5298		err = dev_mc_add_excl(dev, addr);
5299	else
5300		err = -EINVAL;
5301
5302	/* Only return duplicate errors if NLM_F_EXCL is set */
5303	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5304		err = 0;
5305
5306	return err;
5307}
5308
5309/**
5310 * ice_fdb_del - delete an entry from the hardware database
5311 * @ndm: the input from the stack
5312 * @tb: pointer to array of nladdr (unused)
5313 * @dev: the net device pointer
5314 * @addr: the MAC address entry being added
5315 * @vid: VLAN ID
 
5316 */
5317static int
5318ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5319	    struct net_device *dev, const unsigned char *addr,
5320	    __always_unused u16 vid)
5321{
5322	int err;
5323
5324	if (ndm->ndm_state & NUD_PERMANENT) {
5325		netdev_err(dev, "FDB only supports static addresses\n");
5326		return -EINVAL;
5327	}
5328
5329	if (is_unicast_ether_addr(addr))
5330		err = dev_uc_del(dev, addr);
5331	else if (is_multicast_ether_addr(addr))
5332		err = dev_mc_del(dev, addr);
5333	else
5334		err = -EINVAL;
5335
5336	return err;
5337}
5338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5339/**
5340 * ice_set_features - set the netdev feature flags
5341 * @netdev: ptr to the netdev being adjusted
5342 * @features: the feature set that the stack is suggesting
5343 */
5344static int
5345ice_set_features(struct net_device *netdev, netdev_features_t features)
5346{
 
5347	struct ice_netdev_priv *np = netdev_priv(netdev);
5348	struct ice_vsi *vsi = np->vsi;
5349	struct ice_pf *pf = vsi->back;
5350	int ret = 0;
5351
5352	/* Don't set any netdev advanced features with device in Safe Mode */
5353	if (ice_is_safe_mode(vsi->back)) {
5354		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
 
5355		return ret;
5356	}
5357
5358	/* Do not change setting during reset */
5359	if (ice_is_reset_in_progress(pf->state)) {
5360		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
 
5361		return -EBUSY;
5362	}
5363
5364	/* Multiple features can be changed in one call so keep features in
5365	 * separate if/else statements to guarantee each feature is checked
5366	 */
5367	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5368		ice_vsi_manage_rss_lut(vsi, true);
5369	else if (!(features & NETIF_F_RXHASH) &&
5370		 netdev->features & NETIF_F_RXHASH)
5371		ice_vsi_manage_rss_lut(vsi, false);
5372
5373	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5374	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5375		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5376	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5377		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5378		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5379
5380	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5381	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5382		ret = ice_vsi_manage_vlan_insertion(vsi);
5383	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5384		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5385		ret = ice_vsi_manage_vlan_insertion(vsi);
5386
5387	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5388	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5389		ret = ice_cfg_vlan_pruning(vsi, true, false);
5390	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5391		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5392		ret = ice_cfg_vlan_pruning(vsi, false, false);
5393
5394	if ((features & NETIF_F_NTUPLE) &&
5395	    !(netdev->features & NETIF_F_NTUPLE)) {
5396		ice_vsi_manage_fdir(vsi, true);
5397		ice_init_arfs(vsi);
5398	} else if (!(features & NETIF_F_NTUPLE) &&
5399		 (netdev->features & NETIF_F_NTUPLE)) {
5400		ice_vsi_manage_fdir(vsi, false);
5401		ice_clear_arfs(vsi);
5402	}
5403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5404	return ret;
5405}
5406
5407/**
5408 * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5409 * @vsi: VSI to setup VLAN properties for
5410 */
5411static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5412{
5413	int ret = 0;
 
 
 
 
5414
5415	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5416		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5417	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5418		ret = ice_vsi_manage_vlan_insertion(vsi);
5419
5420	return ret;
5421}
5422
5423/**
5424 * ice_vsi_cfg - Setup the VSI
5425 * @vsi: the VSI being configured
5426 *
5427 * Return 0 on success and negative value on error
5428 */
5429int ice_vsi_cfg(struct ice_vsi *vsi)
5430{
5431	int err;
5432
5433	if (vsi->netdev) {
5434		ice_set_rx_mode(vsi->netdev);
5435
5436		err = ice_vsi_vlan_setup(vsi);
5437
5438		if (err)
5439			return err;
5440	}
5441	ice_vsi_cfg_dcb_rings(vsi);
5442
5443	err = ice_vsi_cfg_lan_txqs(vsi);
5444	if (!err && ice_is_xdp_ena_vsi(vsi))
5445		err = ice_vsi_cfg_xdp_txqs(vsi);
5446	if (!err)
5447		err = ice_vsi_cfg_rxqs(vsi);
5448
5449	return err;
5450}
5451
5452/* THEORY OF MODERATION:
5453 * The below code creates custom DIM profiles for use by this driver, because
5454 * the ice driver hardware works differently than the hardware that DIMLIB was
5455 * originally made for. ice hardware doesn't have packet count limits that
5456 * can trigger an interrupt, but it *does* have interrupt rate limit support,
5457 * and this code adds that capability to be used by the driver when it's using
5458 * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
5459 * for how to "respond" to traffic and interrupts, so this driver uses a
5460 * slightly different set of moderation parameters to get best performance.
5461 */
5462struct ice_dim {
5463	/* the throttle rate for interrupts, basically worst case delay before
5464	 * an initial interrupt fires, value is stored in microseconds.
5465	 */
5466	u16 itr;
5467	/* the rate limit for interrupts, which can cap a delay from a small
5468	 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
5469	 * could yield as much as 500,000 interrupts per second, but with a
5470	 * 10us rate limit, it limits to 100,000 interrupts per second. Value
5471	 * is stored in microseconds.
5472	 */
5473	u16 intrl;
5474};
5475
5476/* Make a different profile for Rx that doesn't allow quite so aggressive
5477 * moderation at the high end (it maxes out at 128us or about 8k interrupts a
5478 * second. The INTRL/rate parameters here are only useful to cap small ITR
5479 * values, which is why for larger ITR's - like 128, which can only generate
5480 * 8k interrupts per second, there is no point to rate limit and the values
5481 * are set to zero. The rate limit values do affect latency, and so must
5482 * be reasonably small so to not impact latency sensitive tests.
5483 */
5484static const struct ice_dim rx_profile[] = {
5485	{2, 10},
5486	{8, 16},
5487	{32, 0},
5488	{96, 0},
5489	{128, 0}
5490};
5491
5492/* The transmit profile, which has the same sorts of values
5493 * as the previous struct
5494 */
5495static const struct ice_dim tx_profile[] = {
5496	{2, 10},
5497	{8, 16},
5498	{64, 0},
5499	{128, 0},
5500	{256, 0}
5501};
5502
5503static void ice_tx_dim_work(struct work_struct *work)
5504{
5505	struct ice_ring_container *rc;
5506	struct ice_q_vector *q_vector;
5507	struct dim *dim;
5508	u16 itr, intrl;
5509
5510	dim = container_of(work, struct dim, work);
5511	rc = container_of(dim, struct ice_ring_container, dim);
5512	q_vector = container_of(rc, struct ice_q_vector, tx);
5513
5514	if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
5515		dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
5516
5517	/* look up the values in our local table */
5518	itr = tx_profile[dim->profile_ix].itr;
5519	intrl = tx_profile[dim->profile_ix].intrl;
5520
5521	ice_trace(tx_dim_work, q_vector, dim);
5522	ice_write_itr(rc, itr);
5523	ice_write_intrl(q_vector, intrl);
5524
5525	dim->state = DIM_START_MEASURE;
5526}
5527
5528static void ice_rx_dim_work(struct work_struct *work)
5529{
5530	struct ice_ring_container *rc;
5531	struct ice_q_vector *q_vector;
5532	struct dim *dim;
5533	u16 itr, intrl;
5534
5535	dim = container_of(work, struct dim, work);
5536	rc = container_of(dim, struct ice_ring_container, dim);
5537	q_vector = container_of(rc, struct ice_q_vector, rx);
5538
5539	if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
5540		dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
5541
5542	/* look up the values in our local table */
5543	itr = rx_profile[dim->profile_ix].itr;
5544	intrl = rx_profile[dim->profile_ix].intrl;
5545
5546	ice_trace(rx_dim_work, q_vector, dim);
5547	ice_write_itr(rc, itr);
5548	ice_write_intrl(q_vector, intrl);
5549
5550	dim->state = DIM_START_MEASURE;
5551}
5552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5553/**
5554 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5555 * @vsi: the VSI being configured
5556 */
5557static void ice_napi_enable_all(struct ice_vsi *vsi)
5558{
5559	int q_idx;
5560
5561	if (!vsi->netdev)
5562		return;
5563
5564	ice_for_each_q_vector(vsi, q_idx) {
5565		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5566
5567		INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
5568		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5569
5570		INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
5571		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5572
5573		if (q_vector->rx.ring || q_vector->tx.ring)
5574			napi_enable(&q_vector->napi);
5575	}
5576}
5577
5578/**
5579 * ice_up_complete - Finish the last steps of bringing up a connection
5580 * @vsi: The VSI being configured
5581 *
5582 * Return 0 on success and negative value on error
5583 */
5584static int ice_up_complete(struct ice_vsi *vsi)
5585{
5586	struct ice_pf *pf = vsi->back;
5587	int err;
5588
5589	ice_vsi_cfg_msix(vsi);
5590
5591	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5592	 * Tx queue group list was configured and the context bits were
5593	 * programmed using ice_vsi_cfg_txqs
5594	 */
5595	err = ice_vsi_start_all_rx_rings(vsi);
5596	if (err)
5597		return err;
5598
5599	clear_bit(ICE_VSI_DOWN, vsi->state);
5600	ice_napi_enable_all(vsi);
5601	ice_vsi_ena_irq(vsi);
5602
5603	if (vsi->port_info &&
5604	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5605	    vsi->netdev) {
5606		ice_print_link_msg(vsi, true);
5607		netif_tx_start_all_queues(vsi->netdev);
5608		netif_carrier_on(vsi->netdev);
 
5609	}
5610
5611	ice_service_task_schedule(pf);
 
 
 
 
 
 
5612
5613	return 0;
5614}
5615
5616/**
5617 * ice_up - Bring the connection back up after being down
5618 * @vsi: VSI being configured
5619 */
5620int ice_up(struct ice_vsi *vsi)
5621{
5622	int err;
5623
5624	err = ice_vsi_cfg(vsi);
5625	if (!err)
5626		err = ice_up_complete(vsi);
5627
5628	return err;
5629}
5630
5631/**
5632 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5633 * @ring: Tx or Rx ring to read stats from
 
5634 * @pkts: packets stats counter
5635 * @bytes: bytes stats counter
5636 *
5637 * This function fetches stats from the ring considering the atomic operations
5638 * that needs to be performed to read u64 values in 32 bit machine.
5639 */
5640static void
5641ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
 
5642{
5643	unsigned int start;
5644	*pkts = 0;
5645	*bytes = 0;
5646
5647	if (!ring)
5648		return;
5649	do {
5650		start = u64_stats_fetch_begin_irq(&ring->syncp);
5651		*pkts = ring->stats.pkts;
5652		*bytes = ring->stats.bytes;
5653	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5654}
5655
5656/**
5657 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5658 * @vsi: the VSI to be updated
 
5659 * @rings: rings to work on
5660 * @count: number of rings
5661 */
5662static void
5663ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5664			     u16 count)
 
5665{
5666	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5667	u16 i;
5668
5669	for (i = 0; i < count; i++) {
5670		struct ice_ring *ring;
5671		u64 pkts, bytes;
5672
5673		ring = READ_ONCE(rings[i]);
5674		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
 
 
 
 
5675		vsi_stats->tx_packets += pkts;
5676		vsi_stats->tx_bytes += bytes;
5677		vsi->tx_restart += ring->tx_stats.restart_q;
5678		vsi->tx_busy += ring->tx_stats.tx_busy;
5679		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5680	}
5681}
5682
5683/**
5684 * ice_update_vsi_ring_stats - Update VSI stats counters
5685 * @vsi: the VSI to be updated
5686 */
5687static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5688{
5689	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
 
 
5690	u64 pkts, bytes;
5691	int i;
5692
5693	/* reset netdev stats */
5694	vsi_stats->tx_packets = 0;
5695	vsi_stats->tx_bytes = 0;
5696	vsi_stats->rx_packets = 0;
5697	vsi_stats->rx_bytes = 0;
5698
5699	/* reset non-netdev (extended) stats */
5700	vsi->tx_restart = 0;
5701	vsi->tx_busy = 0;
5702	vsi->tx_linearize = 0;
5703	vsi->rx_buf_failed = 0;
5704	vsi->rx_page_failed = 0;
5705
5706	rcu_read_lock();
5707
5708	/* update Tx rings counters */
5709	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
 
5710
5711	/* update Rx rings counters */
5712	ice_for_each_rxq(vsi, i) {
5713		struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
 
5714
5715		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
 
 
 
5716		vsi_stats->rx_packets += pkts;
5717		vsi_stats->rx_bytes += bytes;
5718		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5719		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5720	}
5721
5722	/* update XDP Tx rings counters */
5723	if (ice_is_xdp_ena_vsi(vsi))
5724		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5725					     vsi->num_xdp_txq);
5726
5727	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5728}
5729
5730/**
5731 * ice_update_vsi_stats - Update VSI stats counters
5732 * @vsi: the VSI to be updated
5733 */
5734void ice_update_vsi_stats(struct ice_vsi *vsi)
5735{
5736	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5737	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5738	struct ice_pf *pf = vsi->back;
5739
5740	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
5741	    test_bit(ICE_CFG_BUSY, pf->state))
5742		return;
5743
5744	/* get stats as recorded by Tx/Rx rings */
5745	ice_update_vsi_ring_stats(vsi);
5746
5747	/* get VSI stats as recorded by the hardware */
5748	ice_update_eth_stats(vsi);
5749
5750	cur_ns->tx_errors = cur_es->tx_errors;
5751	cur_ns->rx_dropped = cur_es->rx_discards;
5752	cur_ns->tx_dropped = cur_es->tx_discards;
5753	cur_ns->multicast = cur_es->rx_multicast;
5754
5755	/* update some more netdev stats if this is main VSI */
5756	if (vsi->type == ICE_VSI_PF) {
5757		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5758		cur_ns->rx_errors = pf->stats.crc_errors +
5759				    pf->stats.illegal_bytes +
5760				    pf->stats.rx_len_errors +
5761				    pf->stats.rx_undersize +
5762				    pf->hw_csum_rx_error +
5763				    pf->stats.rx_jabber +
5764				    pf->stats.rx_fragments +
5765				    pf->stats.rx_oversize;
5766		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5767		/* record drops from the port level */
5768		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5769	}
5770}
5771
5772/**
5773 * ice_update_pf_stats - Update PF port stats counters
5774 * @pf: PF whose stats needs to be updated
5775 */
5776void ice_update_pf_stats(struct ice_pf *pf)
5777{
5778	struct ice_hw_port_stats *prev_ps, *cur_ps;
5779	struct ice_hw *hw = &pf->hw;
5780	u16 fd_ctr_base;
5781	u8 port;
5782
5783	port = hw->port_info->lport;
5784	prev_ps = &pf->stats_prev;
5785	cur_ps = &pf->stats;
5786
 
 
 
5787	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5788			  &prev_ps->eth.rx_bytes,
5789			  &cur_ps->eth.rx_bytes);
5790
5791	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5792			  &prev_ps->eth.rx_unicast,
5793			  &cur_ps->eth.rx_unicast);
5794
5795	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5796			  &prev_ps->eth.rx_multicast,
5797			  &cur_ps->eth.rx_multicast);
5798
5799	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5800			  &prev_ps->eth.rx_broadcast,
5801			  &cur_ps->eth.rx_broadcast);
5802
5803	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5804			  &prev_ps->eth.rx_discards,
5805			  &cur_ps->eth.rx_discards);
5806
5807	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5808			  &prev_ps->eth.tx_bytes,
5809			  &cur_ps->eth.tx_bytes);
5810
5811	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5812			  &prev_ps->eth.tx_unicast,
5813			  &cur_ps->eth.tx_unicast);
5814
5815	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5816			  &prev_ps->eth.tx_multicast,
5817			  &cur_ps->eth.tx_multicast);
5818
5819	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5820			  &prev_ps->eth.tx_broadcast,
5821			  &cur_ps->eth.tx_broadcast);
5822
5823	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5824			  &prev_ps->tx_dropped_link_down,
5825			  &cur_ps->tx_dropped_link_down);
5826
5827	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5828			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5829
5830	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5831			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5832
5833	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5834			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5835
5836	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5837			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5838
5839	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5840			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5841
5842	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5843			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5844
5845	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5846			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5847
5848	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5849			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5850
5851	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5852			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5853
5854	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5855			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5856
5857	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5858			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5859
5860	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5861			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5862
5863	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5864			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5865
5866	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5867			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5868
5869	fd_ctr_base = hw->fd_ctr_base;
5870
5871	ice_stat_update40(hw,
5872			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5873			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5874			  &cur_ps->fd_sb_match);
5875	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5876			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5877
5878	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5879			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5880
5881	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5882			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5883
5884	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5885			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5886
5887	ice_update_dcb_stats(pf);
5888
5889	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5890			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5891
5892	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5893			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5894
5895	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5896			  &prev_ps->mac_local_faults,
5897			  &cur_ps->mac_local_faults);
5898
5899	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5900			  &prev_ps->mac_remote_faults,
5901			  &cur_ps->mac_remote_faults);
5902
5903	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5904			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5905
5906	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5907			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5908
5909	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5910			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5911
5912	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5913			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5914
5915	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5916			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5917
5918	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5919
5920	pf->stat_prev_loaded = true;
5921}
5922
5923/**
5924 * ice_get_stats64 - get statistics for network device structure
5925 * @netdev: network interface device structure
5926 * @stats: main device statistics structure
5927 */
5928static
5929void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5930{
5931	struct ice_netdev_priv *np = netdev_priv(netdev);
5932	struct rtnl_link_stats64 *vsi_stats;
5933	struct ice_vsi *vsi = np->vsi;
5934
5935	vsi_stats = &vsi->net_stats;
5936
5937	if (!vsi->num_txq || !vsi->num_rxq)
5938		return;
5939
5940	/* netdev packet/byte stats come from ring counter. These are obtained
5941	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5942	 * But, only call the update routine and read the registers if VSI is
5943	 * not down.
5944	 */
5945	if (!test_bit(ICE_VSI_DOWN, vsi->state))
5946		ice_update_vsi_ring_stats(vsi);
5947	stats->tx_packets = vsi_stats->tx_packets;
5948	stats->tx_bytes = vsi_stats->tx_bytes;
5949	stats->rx_packets = vsi_stats->rx_packets;
5950	stats->rx_bytes = vsi_stats->rx_bytes;
5951
5952	/* The rest of the stats can be read from the hardware but instead we
5953	 * just return values that the watchdog task has already obtained from
5954	 * the hardware.
5955	 */
5956	stats->multicast = vsi_stats->multicast;
5957	stats->tx_errors = vsi_stats->tx_errors;
5958	stats->tx_dropped = vsi_stats->tx_dropped;
5959	stats->rx_errors = vsi_stats->rx_errors;
5960	stats->rx_dropped = vsi_stats->rx_dropped;
5961	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5962	stats->rx_length_errors = vsi_stats->rx_length_errors;
5963}
5964
5965/**
5966 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5967 * @vsi: VSI having NAPI disabled
5968 */
5969static void ice_napi_disable_all(struct ice_vsi *vsi)
5970{
5971	int q_idx;
5972
5973	if (!vsi->netdev)
5974		return;
5975
5976	ice_for_each_q_vector(vsi, q_idx) {
5977		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5978
5979		if (q_vector->rx.ring || q_vector->tx.ring)
5980			napi_disable(&q_vector->napi);
5981
5982		cancel_work_sync(&q_vector->tx.dim.work);
5983		cancel_work_sync(&q_vector->rx.dim.work);
5984	}
5985}
5986
5987/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5988 * ice_down - Shutdown the connection
5989 * @vsi: The VSI being stopped
 
 
5990 */
5991int ice_down(struct ice_vsi *vsi)
5992{
5993	int i, tx_err, rx_err, link_err = 0;
5994
5995	/* Caller of this function is expected to set the
5996	 * vsi->state ICE_DOWN bit
5997	 */
5998	if (vsi->netdev) {
 
5999		netif_carrier_off(vsi->netdev);
6000		netif_tx_disable(vsi->netdev);
 
 
6001	}
6002
6003	ice_vsi_dis_irq(vsi);
6004
6005	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6006	if (tx_err)
6007		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6008			   vsi->vsi_num, tx_err);
6009	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6010		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6011		if (tx_err)
6012			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6013				   vsi->vsi_num, tx_err);
6014	}
6015
6016	rx_err = ice_vsi_stop_all_rx_rings(vsi);
6017	if (rx_err)
6018		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6019			   vsi->vsi_num, rx_err);
6020
6021	ice_napi_disable_all(vsi);
6022
6023	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6024		link_err = ice_force_phys_link_state(vsi, false);
6025		if (link_err)
6026			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
6027				   vsi->vsi_num, link_err);
6028	}
6029
6030	ice_for_each_txq(vsi, i)
6031		ice_clean_tx_ring(vsi->tx_rings[i]);
6032
 
 
 
 
6033	ice_for_each_rxq(vsi, i)
6034		ice_clean_rx_ring(vsi->rx_rings[i]);
6035
6036	if (tx_err || rx_err || link_err) {
6037		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6038			   vsi->vsi_num, vsi->vsw->sw_id);
6039		return -EIO;
6040	}
6041
6042	return 0;
6043}
6044
6045/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6046 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6047 * @vsi: VSI having resources allocated
6048 *
6049 * Return 0 on success, negative on failure
6050 */
6051int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6052{
6053	int i, err = 0;
6054
6055	if (!vsi->num_txq) {
6056		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6057			vsi->vsi_num);
6058		return -EINVAL;
6059	}
6060
6061	ice_for_each_txq(vsi, i) {
6062		struct ice_ring *ring = vsi->tx_rings[i];
6063
6064		if (!ring)
6065			return -EINVAL;
6066
6067		ring->netdev = vsi->netdev;
 
6068		err = ice_setup_tx_ring(ring);
6069		if (err)
6070			break;
6071	}
6072
6073	return err;
6074}
6075
6076/**
6077 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6078 * @vsi: VSI having resources allocated
6079 *
6080 * Return 0 on success, negative on failure
6081 */
6082int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6083{
6084	int i, err = 0;
6085
6086	if (!vsi->num_rxq) {
6087		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6088			vsi->vsi_num);
6089		return -EINVAL;
6090	}
6091
6092	ice_for_each_rxq(vsi, i) {
6093		struct ice_ring *ring = vsi->rx_rings[i];
6094
6095		if (!ring)
6096			return -EINVAL;
6097
6098		ring->netdev = vsi->netdev;
 
6099		err = ice_setup_rx_ring(ring);
6100		if (err)
6101			break;
6102	}
6103
6104	return err;
6105}
6106
6107/**
6108 * ice_vsi_open_ctrl - open control VSI for use
6109 * @vsi: the VSI to open
6110 *
6111 * Initialization of the Control VSI
6112 *
6113 * Returns 0 on success, negative value on error
6114 */
6115int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6116{
6117	char int_name[ICE_INT_NAME_STR_LEN];
6118	struct ice_pf *pf = vsi->back;
6119	struct device *dev;
6120	int err;
6121
6122	dev = ice_pf_to_dev(pf);
6123	/* allocate descriptors */
6124	err = ice_vsi_setup_tx_rings(vsi);
6125	if (err)
6126		goto err_setup_tx;
6127
6128	err = ice_vsi_setup_rx_rings(vsi);
6129	if (err)
6130		goto err_setup_rx;
6131
6132	err = ice_vsi_cfg(vsi);
6133	if (err)
6134		goto err_setup_rx;
6135
6136	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6137		 dev_driver_string(dev), dev_name(dev));
6138	err = ice_vsi_req_irq_msix(vsi, int_name);
6139	if (err)
6140		goto err_setup_rx;
6141
6142	ice_vsi_cfg_msix(vsi);
6143
6144	err = ice_vsi_start_all_rx_rings(vsi);
6145	if (err)
6146		goto err_up_complete;
6147
6148	clear_bit(ICE_VSI_DOWN, vsi->state);
6149	ice_vsi_ena_irq(vsi);
6150
6151	return 0;
6152
6153err_up_complete:
6154	ice_down(vsi);
6155err_setup_rx:
6156	ice_vsi_free_rx_rings(vsi);
6157err_setup_tx:
6158	ice_vsi_free_tx_rings(vsi);
6159
6160	return err;
6161}
6162
6163/**
6164 * ice_vsi_open - Called when a network interface is made active
6165 * @vsi: the VSI to open
6166 *
6167 * Initialization of the VSI
6168 *
6169 * Returns 0 on success, negative value on error
6170 */
6171static int ice_vsi_open(struct ice_vsi *vsi)
6172{
6173	char int_name[ICE_INT_NAME_STR_LEN];
6174	struct ice_pf *pf = vsi->back;
6175	int err;
6176
6177	/* allocate descriptors */
6178	err = ice_vsi_setup_tx_rings(vsi);
6179	if (err)
6180		goto err_setup_tx;
6181
6182	err = ice_vsi_setup_rx_rings(vsi);
6183	if (err)
6184		goto err_setup_rx;
6185
6186	err = ice_vsi_cfg(vsi);
6187	if (err)
6188		goto err_setup_rx;
6189
6190	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6191		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6192	err = ice_vsi_req_irq_msix(vsi, int_name);
6193	if (err)
6194		goto err_setup_rx;
6195
6196	/* Notify the stack of the actual queue counts. */
6197	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6198	if (err)
6199		goto err_set_qs;
 
 
 
6200
6201	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6202	if (err)
6203		goto err_set_qs;
 
6204
6205	err = ice_up_complete(vsi);
6206	if (err)
6207		goto err_up_complete;
6208
6209	return 0;
6210
6211err_up_complete:
6212	ice_down(vsi);
6213err_set_qs:
6214	ice_vsi_free_irq(vsi);
6215err_setup_rx:
6216	ice_vsi_free_rx_rings(vsi);
6217err_setup_tx:
6218	ice_vsi_free_tx_rings(vsi);
6219
6220	return err;
6221}
6222
6223/**
6224 * ice_vsi_release_all - Delete all VSIs
6225 * @pf: PF from which all VSIs are being removed
6226 */
6227static void ice_vsi_release_all(struct ice_pf *pf)
6228{
6229	int err, i;
6230
6231	if (!pf->vsi)
6232		return;
6233
6234	ice_for_each_vsi(pf, i) {
6235		if (!pf->vsi[i])
6236			continue;
6237
 
 
 
6238		err = ice_vsi_release(pf->vsi[i]);
6239		if (err)
6240			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6241				i, err, pf->vsi[i]->vsi_num);
6242	}
6243}
6244
6245/**
6246 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6247 * @pf: pointer to the PF instance
6248 * @type: VSI type to rebuild
6249 *
6250 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6251 */
6252static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6253{
6254	struct device *dev = ice_pf_to_dev(pf);
6255	enum ice_status status;
6256	int i, err;
6257
6258	ice_for_each_vsi(pf, i) {
6259		struct ice_vsi *vsi = pf->vsi[i];
6260
6261		if (!vsi || vsi->type != type)
6262			continue;
6263
6264		/* rebuild the VSI */
6265		err = ice_vsi_rebuild(vsi, true);
6266		if (err) {
6267			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6268				err, vsi->idx, ice_vsi_type_str(type));
6269			return err;
6270		}
6271
6272		/* replay filters for the VSI */
6273		status = ice_replay_vsi(&pf->hw, vsi->idx);
6274		if (status) {
6275			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
6276				ice_stat_str(status), vsi->idx,
6277				ice_vsi_type_str(type));
6278			return -EIO;
6279		}
6280
6281		/* Re-map HW VSI number, using VSI handle that has been
6282		 * previously validated in ice_replay_vsi() call above
6283		 */
6284		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6285
6286		/* enable the VSI */
6287		err = ice_ena_vsi(vsi, false);
6288		if (err) {
6289			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6290				err, vsi->idx, ice_vsi_type_str(type));
6291			return err;
6292		}
6293
6294		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6295			 ice_vsi_type_str(type));
6296	}
6297
6298	return 0;
6299}
6300
6301/**
6302 * ice_update_pf_netdev_link - Update PF netdev link status
6303 * @pf: pointer to the PF instance
6304 */
6305static void ice_update_pf_netdev_link(struct ice_pf *pf)
6306{
6307	bool link_up;
6308	int i;
6309
6310	ice_for_each_vsi(pf, i) {
6311		struct ice_vsi *vsi = pf->vsi[i];
6312
6313		if (!vsi || vsi->type != ICE_VSI_PF)
6314			return;
6315
6316		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6317		if (link_up) {
6318			netif_carrier_on(pf->vsi[i]->netdev);
6319			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6320		} else {
6321			netif_carrier_off(pf->vsi[i]->netdev);
6322			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6323		}
6324	}
6325}
6326
6327/**
6328 * ice_rebuild - rebuild after reset
6329 * @pf: PF to rebuild
6330 * @reset_type: type of reset
6331 *
6332 * Do not rebuild VF VSI in this flow because that is already handled via
6333 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6334 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6335 * to reset/rebuild all the VF VSI twice.
6336 */
6337static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6338{
6339	struct device *dev = ice_pf_to_dev(pf);
6340	struct ice_hw *hw = &pf->hw;
6341	enum ice_status ret;
6342	int err;
6343
6344	if (test_bit(ICE_DOWN, pf->state))
6345		goto clear_recovery;
6346
6347	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6348
6349	ret = ice_init_all_ctrlq(hw);
6350	if (ret) {
6351		dev_err(dev, "control queues init failed %s\n",
6352			ice_stat_str(ret));
 
 
 
 
 
 
 
 
 
 
6353		goto err_init_ctrlq;
6354	}
6355
6356	/* if DDP was previously loaded successfully */
6357	if (!ice_is_safe_mode(pf)) {
6358		/* reload the SW DB of filter tables */
6359		if (reset_type == ICE_RESET_PFR)
6360			ice_fill_blk_tbls(hw);
6361		else
6362			/* Reload DDP Package after CORER/GLOBR reset */
6363			ice_load_pkg(NULL, pf);
6364	}
6365
6366	ret = ice_clear_pf_cfg(hw);
6367	if (ret) {
6368		dev_err(dev, "clear PF configuration failed %s\n",
6369			ice_stat_str(ret));
6370		goto err_init_ctrlq;
6371	}
6372
6373	if (pf->first_sw->dflt_vsi_ena)
6374		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6375	/* clear the default VSI configuration if it exists */
6376	pf->first_sw->dflt_vsi = NULL;
6377	pf->first_sw->dflt_vsi_ena = false;
6378
6379	ice_clear_pxe_mode(hw);
6380
6381	ret = ice_init_nvm(hw);
6382	if (ret) {
6383		dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
6384		goto err_init_ctrlq;
6385	}
6386
6387	ret = ice_get_caps(hw);
6388	if (ret) {
6389		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6390		goto err_init_ctrlq;
6391	}
6392
6393	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6394	if (ret) {
6395		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6396		goto err_init_ctrlq;
6397	}
6398
 
 
 
 
 
 
6399	err = ice_sched_init_port(hw->port_info);
6400	if (err)
6401		goto err_sched_init_port;
6402
6403	/* start misc vector */
6404	err = ice_req_irq_msix_misc(pf);
6405	if (err) {
6406		dev_err(dev, "misc vector setup failed: %d\n", err);
6407		goto err_sched_init_port;
6408	}
6409
6410	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6411		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6412		if (!rd32(hw, PFQF_FD_SIZE)) {
6413			u16 unused, guar, b_effort;
6414
6415			guar = hw->func_caps.fd_fltr_guar;
6416			b_effort = hw->func_caps.fd_fltr_best_effort;
6417
6418			/* force guaranteed filter pool for PF */
6419			ice_alloc_fd_guar_item(hw, &unused, guar);
6420			/* force shared filter pool for PF */
6421			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6422		}
6423	}
6424
6425	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6426		ice_dcb_rebuild(pf);
6427
6428	/* If the PF previously had enabled PTP, PTP init needs to happen before
6429	 * the VSI rebuild. If not, this causes the PTP link status events to
6430	 * fail.
6431	 */
6432	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6433		ice_ptp_init(pf);
 
 
 
6434
6435	/* rebuild PF VSI */
6436	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6437	if (err) {
6438		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6439		goto err_vsi_rebuild;
6440	}
6441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6442	/* If Flow Director is active */
6443	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6444		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6445		if (err) {
6446			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6447			goto err_vsi_rebuild;
6448		}
6449
6450		/* replay HW Flow Director recipes */
6451		if (hw->fdir_prof)
6452			ice_fdir_replay_flows(hw);
6453
6454		/* replay Flow Director filters */
6455		ice_fdir_replay_fltrs(pf);
6456
6457		ice_rebuild_arfs(pf);
6458	}
6459
6460	ice_update_pf_netdev_link(pf);
6461
6462	/* tell the firmware we are up */
6463	ret = ice_send_version(pf);
6464	if (ret) {
6465		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6466			ice_stat_str(ret));
6467		goto err_vsi_rebuild;
6468	}
6469
6470	ice_replay_post(hw);
6471
6472	/* if we get here, reset flow is successful */
6473	clear_bit(ICE_RESET_FAILED, pf->state);
6474
6475	ice_plug_aux_dev(pf);
 
 
 
 
 
6476	return;
6477
6478err_vsi_rebuild:
6479err_sched_init_port:
6480	ice_sched_cleanup_all(hw);
6481err_init_ctrlq:
6482	ice_shutdown_all_ctrlq(hw);
6483	set_bit(ICE_RESET_FAILED, pf->state);
6484clear_recovery:
6485	/* set this bit in PF state to control service task scheduling */
6486	set_bit(ICE_NEEDS_RESTART, pf->state);
6487	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6488}
6489
6490/**
6491 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6492 * @vsi: Pointer to VSI structure
6493 */
6494static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6495{
6496	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6497		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6498	else
6499		return ICE_RXBUF_3072;
6500}
6501
6502/**
6503 * ice_change_mtu - NDO callback to change the MTU
6504 * @netdev: network interface device structure
6505 * @new_mtu: new value for maximum frame size
6506 *
6507 * Returns 0 on success, negative on failure
6508 */
6509static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6510{
6511	struct ice_netdev_priv *np = netdev_priv(netdev);
6512	struct ice_vsi *vsi = np->vsi;
6513	struct ice_pf *pf = vsi->back;
6514	struct iidc_event *event;
6515	u8 count = 0;
6516	int err = 0;
6517
6518	if (new_mtu == (int)netdev->mtu) {
6519		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6520		return 0;
6521	}
6522
6523	if (ice_is_xdp_ena_vsi(vsi)) {
 
6524		int frame_size = ice_max_xdp_frame_size(vsi);
6525
6526		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6527			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6528				   frame_size - ICE_ETH_PKT_HDR_PAD);
6529			return -EINVAL;
6530		}
 
 
 
 
 
 
6531	}
6532
6533	/* if a reset is in progress, wait for some time for it to complete */
6534	do {
6535		if (ice_is_reset_in_progress(pf->state)) {
6536			count++;
6537			usleep_range(1000, 2000);
6538		} else {
6539			break;
6540		}
6541
6542	} while (count < 100);
6543
6544	if (count == 100) {
6545		netdev_err(netdev, "can't change MTU. Device is busy\n");
6546		return -EBUSY;
6547	}
6548
6549	event = kzalloc(sizeof(*event), GFP_KERNEL);
6550	if (!event)
6551		return -ENOMEM;
6552
6553	set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6554	ice_send_event_to_aux(pf, event);
6555	clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6556
6557	netdev->mtu = (unsigned int)new_mtu;
6558
6559	/* if VSI is up, bring it down and then back up */
6560	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6561		err = ice_down(vsi);
6562		if (err) {
6563			netdev_err(netdev, "change MTU if_down err %d\n", err);
6564			goto event_after;
6565		}
6566
6567		err = ice_up(vsi);
6568		if (err) {
6569			netdev_err(netdev, "change MTU if_up err %d\n", err);
6570			goto event_after;
6571		}
6572	}
6573
6574	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6575event_after:
6576	set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6577	ice_send_event_to_aux(pf, event);
6578	kfree(event);
6579
6580	return err;
6581}
6582
6583/**
6584 * ice_do_ioctl - Access the hwtstamp interface
6585 * @netdev: network interface device structure
6586 * @ifr: interface request data
6587 * @cmd: ioctl command
6588 */
6589static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6590{
6591	struct ice_netdev_priv *np = netdev_priv(netdev);
6592	struct ice_pf *pf = np->vsi->back;
6593
6594	switch (cmd) {
6595	case SIOCGHWTSTAMP:
6596		return ice_ptp_get_ts_config(pf, ifr);
6597	case SIOCSHWTSTAMP:
6598		return ice_ptp_set_ts_config(pf, ifr);
6599	default:
6600		return -EOPNOTSUPP;
6601	}
6602}
6603
6604/**
6605 * ice_aq_str - convert AQ err code to a string
6606 * @aq_err: the AQ error code to convert
6607 */
6608const char *ice_aq_str(enum ice_aq_err aq_err)
6609{
6610	switch (aq_err) {
6611	case ICE_AQ_RC_OK:
6612		return "OK";
6613	case ICE_AQ_RC_EPERM:
6614		return "ICE_AQ_RC_EPERM";
6615	case ICE_AQ_RC_ENOENT:
6616		return "ICE_AQ_RC_ENOENT";
6617	case ICE_AQ_RC_ENOMEM:
6618		return "ICE_AQ_RC_ENOMEM";
6619	case ICE_AQ_RC_EBUSY:
6620		return "ICE_AQ_RC_EBUSY";
6621	case ICE_AQ_RC_EEXIST:
6622		return "ICE_AQ_RC_EEXIST";
6623	case ICE_AQ_RC_EINVAL:
6624		return "ICE_AQ_RC_EINVAL";
6625	case ICE_AQ_RC_ENOSPC:
6626		return "ICE_AQ_RC_ENOSPC";
6627	case ICE_AQ_RC_ENOSYS:
6628		return "ICE_AQ_RC_ENOSYS";
6629	case ICE_AQ_RC_EMODE:
6630		return "ICE_AQ_RC_EMODE";
6631	case ICE_AQ_RC_ENOSEC:
6632		return "ICE_AQ_RC_ENOSEC";
6633	case ICE_AQ_RC_EBADSIG:
6634		return "ICE_AQ_RC_EBADSIG";
6635	case ICE_AQ_RC_ESVN:
6636		return "ICE_AQ_RC_ESVN";
6637	case ICE_AQ_RC_EBADMAN:
6638		return "ICE_AQ_RC_EBADMAN";
6639	case ICE_AQ_RC_EBADBUF:
6640		return "ICE_AQ_RC_EBADBUF";
6641	}
6642
6643	return "ICE_AQ_RC_UNKNOWN";
6644}
6645
6646/**
6647 * ice_stat_str - convert status err code to a string
6648 * @stat_err: the status error code to convert
6649 */
6650const char *ice_stat_str(enum ice_status stat_err)
6651{
6652	switch (stat_err) {
6653	case ICE_SUCCESS:
6654		return "OK";
6655	case ICE_ERR_PARAM:
6656		return "ICE_ERR_PARAM";
6657	case ICE_ERR_NOT_IMPL:
6658		return "ICE_ERR_NOT_IMPL";
6659	case ICE_ERR_NOT_READY:
6660		return "ICE_ERR_NOT_READY";
6661	case ICE_ERR_NOT_SUPPORTED:
6662		return "ICE_ERR_NOT_SUPPORTED";
6663	case ICE_ERR_BAD_PTR:
6664		return "ICE_ERR_BAD_PTR";
6665	case ICE_ERR_INVAL_SIZE:
6666		return "ICE_ERR_INVAL_SIZE";
6667	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6668		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6669	case ICE_ERR_RESET_FAILED:
6670		return "ICE_ERR_RESET_FAILED";
6671	case ICE_ERR_FW_API_VER:
6672		return "ICE_ERR_FW_API_VER";
6673	case ICE_ERR_NO_MEMORY:
6674		return "ICE_ERR_NO_MEMORY";
6675	case ICE_ERR_CFG:
6676		return "ICE_ERR_CFG";
6677	case ICE_ERR_OUT_OF_RANGE:
6678		return "ICE_ERR_OUT_OF_RANGE";
6679	case ICE_ERR_ALREADY_EXISTS:
6680		return "ICE_ERR_ALREADY_EXISTS";
6681	case ICE_ERR_NVM:
6682		return "ICE_ERR_NVM";
6683	case ICE_ERR_NVM_CHECKSUM:
6684		return "ICE_ERR_NVM_CHECKSUM";
6685	case ICE_ERR_BUF_TOO_SHORT:
6686		return "ICE_ERR_BUF_TOO_SHORT";
6687	case ICE_ERR_NVM_BLANK_MODE:
6688		return "ICE_ERR_NVM_BLANK_MODE";
6689	case ICE_ERR_IN_USE:
6690		return "ICE_ERR_IN_USE";
6691	case ICE_ERR_MAX_LIMIT:
6692		return "ICE_ERR_MAX_LIMIT";
6693	case ICE_ERR_RESET_ONGOING:
6694		return "ICE_ERR_RESET_ONGOING";
6695	case ICE_ERR_HW_TABLE:
6696		return "ICE_ERR_HW_TABLE";
6697	case ICE_ERR_DOES_NOT_EXIST:
6698		return "ICE_ERR_DOES_NOT_EXIST";
6699	case ICE_ERR_FW_DDP_MISMATCH:
6700		return "ICE_ERR_FW_DDP_MISMATCH";
6701	case ICE_ERR_AQ_ERROR:
6702		return "ICE_ERR_AQ_ERROR";
6703	case ICE_ERR_AQ_TIMEOUT:
6704		return "ICE_ERR_AQ_TIMEOUT";
6705	case ICE_ERR_AQ_FULL:
6706		return "ICE_ERR_AQ_FULL";
6707	case ICE_ERR_AQ_NO_WORK:
6708		return "ICE_ERR_AQ_NO_WORK";
6709	case ICE_ERR_AQ_EMPTY:
6710		return "ICE_ERR_AQ_EMPTY";
6711	case ICE_ERR_AQ_FW_CRITICAL:
6712		return "ICE_ERR_AQ_FW_CRITICAL";
6713	}
6714
6715	return "ICE_ERR_UNKNOWN";
6716}
6717
6718/**
6719 * ice_set_rss_lut - Set RSS LUT
6720 * @vsi: Pointer to VSI structure
6721 * @lut: Lookup table
6722 * @lut_size: Lookup table size
6723 *
6724 * Returns 0 on success, negative on failure
6725 */
6726int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6727{
6728	struct ice_aq_get_set_rss_lut_params params = {};
6729	struct ice_hw *hw = &vsi->back->hw;
6730	enum ice_status status;
6731
6732	if (!lut)
6733		return -EINVAL;
6734
6735	params.vsi_handle = vsi->idx;
6736	params.lut_size = lut_size;
6737	params.lut_type = vsi->rss_lut_type;
6738	params.lut = lut;
6739
6740	status = ice_aq_set_rss_lut(hw, &params);
6741	if (status) {
6742		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
6743			ice_stat_str(status),
6744			ice_aq_str(hw->adminq.sq_last_status));
6745		return -EIO;
6746	}
6747
6748	return 0;
6749}
6750
6751/**
6752 * ice_set_rss_key - Set RSS key
6753 * @vsi: Pointer to the VSI structure
6754 * @seed: RSS hash seed
6755 *
6756 * Returns 0 on success, negative on failure
6757 */
6758int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6759{
6760	struct ice_hw *hw = &vsi->back->hw;
6761	enum ice_status status;
6762
6763	if (!seed)
6764		return -EINVAL;
6765
6766	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6767	if (status) {
6768		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
6769			ice_stat_str(status),
6770			ice_aq_str(hw->adminq.sq_last_status));
6771		return -EIO;
6772	}
6773
6774	return 0;
6775}
6776
6777/**
6778 * ice_get_rss_lut - Get RSS LUT
6779 * @vsi: Pointer to VSI structure
6780 * @lut: Buffer to store the lookup table entries
6781 * @lut_size: Size of buffer to store the lookup table entries
6782 *
6783 * Returns 0 on success, negative on failure
6784 */
6785int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6786{
6787	struct ice_aq_get_set_rss_lut_params params = {};
6788	struct ice_hw *hw = &vsi->back->hw;
6789	enum ice_status status;
6790
6791	if (!lut)
6792		return -EINVAL;
6793
6794	params.vsi_handle = vsi->idx;
6795	params.lut_size = lut_size;
6796	params.lut_type = vsi->rss_lut_type;
6797	params.lut = lut;
6798
6799	status = ice_aq_get_rss_lut(hw, &params);
6800	if (status) {
6801		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
6802			ice_stat_str(status),
6803			ice_aq_str(hw->adminq.sq_last_status));
6804		return -EIO;
6805	}
6806
6807	return 0;
6808}
6809
6810/**
6811 * ice_get_rss_key - Get RSS key
6812 * @vsi: Pointer to VSI structure
6813 * @seed: Buffer to store the key in
6814 *
6815 * Returns 0 on success, negative on failure
6816 */
6817int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
6818{
6819	struct ice_hw *hw = &vsi->back->hw;
6820	enum ice_status status;
6821
6822	if (!seed)
6823		return -EINVAL;
6824
6825	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6826	if (status) {
6827		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
6828			ice_stat_str(status),
6829			ice_aq_str(hw->adminq.sq_last_status));
6830		return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6831	}
 
 
 
6832
6833	return 0;
 
 
6834}
6835
6836/**
6837 * ice_bridge_getlink - Get the hardware bridge mode
6838 * @skb: skb buff
6839 * @pid: process ID
6840 * @seq: RTNL message seq
6841 * @dev: the netdev being configured
6842 * @filter_mask: filter mask passed in
6843 * @nlflags: netlink flags passed in
6844 *
6845 * Return the bridge mode (VEB/VEPA)
6846 */
6847static int
6848ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6849		   struct net_device *dev, u32 filter_mask, int nlflags)
6850{
6851	struct ice_netdev_priv *np = netdev_priv(dev);
6852	struct ice_vsi *vsi = np->vsi;
6853	struct ice_pf *pf = vsi->back;
6854	u16 bmode;
6855
6856	bmode = pf->first_sw->bridge_mode;
6857
6858	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6859				       filter_mask, NULL);
6860}
6861
6862/**
6863 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6864 * @vsi: Pointer to VSI structure
6865 * @bmode: Hardware bridge mode (VEB/VEPA)
6866 *
6867 * Returns 0 on success, negative on failure
6868 */
6869static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6870{
6871	struct ice_aqc_vsi_props *vsi_props;
6872	struct ice_hw *hw = &vsi->back->hw;
6873	struct ice_vsi_ctx *ctxt;
6874	enum ice_status status;
6875	int ret = 0;
6876
6877	vsi_props = &vsi->info;
6878
6879	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6880	if (!ctxt)
6881		return -ENOMEM;
6882
6883	ctxt->info = vsi->info;
6884
6885	if (bmode == BRIDGE_MODE_VEB)
6886		/* change from VEPA to VEB mode */
6887		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6888	else
6889		/* change from VEB to VEPA mode */
6890		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6891	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6892
6893	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6894	if (status) {
6895		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6896			bmode, ice_stat_str(status),
6897			ice_aq_str(hw->adminq.sq_last_status));
6898		ret = -EIO;
6899		goto out;
6900	}
6901	/* Update sw flags for book keeping */
6902	vsi_props->sw_flags = ctxt->info.sw_flags;
6903
6904out:
6905	kfree(ctxt);
6906	return ret;
6907}
6908
6909/**
6910 * ice_bridge_setlink - Set the hardware bridge mode
6911 * @dev: the netdev being configured
6912 * @nlh: RTNL message
6913 * @flags: bridge setlink flags
6914 * @extack: netlink extended ack
6915 *
6916 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6917 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6918 * not already set for all VSIs connected to this switch. And also update the
6919 * unicast switch filter rules for the corresponding switch of the netdev.
6920 */
6921static int
6922ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6923		   u16 __always_unused flags,
6924		   struct netlink_ext_ack __always_unused *extack)
6925{
6926	struct ice_netdev_priv *np = netdev_priv(dev);
6927	struct ice_pf *pf = np->vsi->back;
6928	struct nlattr *attr, *br_spec;
6929	struct ice_hw *hw = &pf->hw;
6930	enum ice_status status;
6931	struct ice_sw *pf_sw;
6932	int rem, v, err = 0;
6933
6934	pf_sw = pf->first_sw;
6935	/* find the attribute in the netlink message */
6936	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
 
 
6937
6938	nla_for_each_nested(attr, br_spec, rem) {
6939		__u16 mode;
6940
6941		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6942			continue;
6943		mode = nla_get_u16(attr);
6944		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6945			return -EINVAL;
6946		/* Continue  if bridge mode is not being flipped */
6947		if (mode == pf_sw->bridge_mode)
6948			continue;
6949		/* Iterates through the PF VSI list and update the loopback
6950		 * mode of the VSI
6951		 */
6952		ice_for_each_vsi(pf, v) {
6953			if (!pf->vsi[v])
6954				continue;
6955			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6956			if (err)
6957				return err;
6958		}
6959
6960		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6961		/* Update the unicast switch filter rules for the corresponding
6962		 * switch of the netdev
6963		 */
6964		status = ice_update_sw_rule_bridge_mode(hw);
6965		if (status) {
6966			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6967				   mode, ice_stat_str(status),
6968				   ice_aq_str(hw->adminq.sq_last_status));
6969			/* revert hw->evb_veb */
6970			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6971			return -EIO;
6972		}
6973
6974		pf_sw->bridge_mode = mode;
6975	}
6976
6977	return 0;
6978}
6979
6980/**
6981 * ice_tx_timeout - Respond to a Tx Hang
6982 * @netdev: network interface device structure
6983 * @txqueue: Tx queue
6984 */
6985static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6986{
6987	struct ice_netdev_priv *np = netdev_priv(netdev);
6988	struct ice_ring *tx_ring = NULL;
6989	struct ice_vsi *vsi = np->vsi;
6990	struct ice_pf *pf = vsi->back;
6991	u32 i;
6992
6993	pf->tx_timeout_count++;
6994
6995	/* Check if PFC is enabled for the TC to which the queue belongs
6996	 * to. If yes then Tx timeout is not caused by a hung queue, no
6997	 * need to reset and rebuild
6998	 */
6999	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7000		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7001			 txqueue);
7002		return;
7003	}
7004
7005	/* now that we have an index, find the tx_ring struct */
7006	for (i = 0; i < vsi->num_txq; i++)
7007		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7008			if (txqueue == vsi->tx_rings[i]->q_index) {
7009				tx_ring = vsi->tx_rings[i];
7010				break;
7011			}
7012
7013	/* Reset recovery level if enough time has elapsed after last timeout.
7014	 * Also ensure no new reset action happens before next timeout period.
7015	 */
7016	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7017		pf->tx_timeout_recovery_level = 1;
7018	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7019				       netdev->watchdog_timeo)))
7020		return;
7021
7022	if (tx_ring) {
7023		struct ice_hw *hw = &pf->hw;
7024		u32 head, val = 0;
7025
7026		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7027			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7028		/* Read interrupt register */
7029		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7030
7031		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7032			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7033			    head, tx_ring->next_to_use, val);
7034	}
7035
7036	pf->tx_timeout_last_recovery = jiffies;
7037	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7038		    pf->tx_timeout_recovery_level, txqueue);
7039
7040	switch (pf->tx_timeout_recovery_level) {
7041	case 1:
7042		set_bit(ICE_PFR_REQ, pf->state);
7043		break;
7044	case 2:
7045		set_bit(ICE_CORER_REQ, pf->state);
7046		break;
7047	case 3:
7048		set_bit(ICE_GLOBR_REQ, pf->state);
7049		break;
7050	default:
7051		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7052		set_bit(ICE_DOWN, pf->state);
7053		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7054		set_bit(ICE_SERVICE_DIS, pf->state);
7055		break;
7056	}
7057
7058	ice_service_task_schedule(pf);
7059	pf->tx_timeout_recovery_level++;
7060}
7061
7062/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7063 * ice_open - Called when a network interface becomes active
7064 * @netdev: network interface device structure
7065 *
7066 * The open entry point is called when a network interface is made
7067 * active by the system (IFF_UP). At this point all resources needed
7068 * for transmit and receive operations are allocated, the interrupt
7069 * handler is registered with the OS, the netdev watchdog is enabled,
7070 * and the stack is notified that the interface is ready.
7071 *
7072 * Returns 0 on success, negative value on failure
7073 */
7074int ice_open(struct net_device *netdev)
7075{
7076	struct ice_netdev_priv *np = netdev_priv(netdev);
7077	struct ice_pf *pf = np->vsi->back;
7078
7079	if (ice_is_reset_in_progress(pf->state)) {
7080		netdev_err(netdev, "can't open net device while reset is in progress");
7081		return -EBUSY;
7082	}
7083
7084	return ice_open_internal(netdev);
7085}
7086
7087/**
7088 * ice_open_internal - Called when a network interface becomes active
7089 * @netdev: network interface device structure
7090 *
7091 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
7092 * handling routine
7093 *
7094 * Returns 0 on success, negative value on failure
7095 */
7096int ice_open_internal(struct net_device *netdev)
7097{
7098	struct ice_netdev_priv *np = netdev_priv(netdev);
7099	struct ice_vsi *vsi = np->vsi;
7100	struct ice_pf *pf = vsi->back;
7101	struct ice_port_info *pi;
7102	enum ice_status status;
7103	int err;
7104
7105	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
7106		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
7107		return -EIO;
7108	}
7109
7110	netif_carrier_off(netdev);
7111
7112	pi = vsi->port_info;
7113	status = ice_update_link_info(pi);
7114	if (status) {
7115		netdev_err(netdev, "Failed to get link info, error %s\n",
7116			   ice_stat_str(status));
7117		return -EIO;
7118	}
7119
7120	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
7121
7122	/* Set PHY if there is media, otherwise, turn off PHY */
7123	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
7124		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7125		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
7126			err = ice_init_phy_user_cfg(pi);
7127			if (err) {
7128				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
7129					   err);
7130				return err;
7131			}
7132		}
7133
7134		err = ice_configure_phy(vsi);
7135		if (err) {
7136			netdev_err(netdev, "Failed to set physical link up, error %d\n",
7137				   err);
7138			return err;
7139		}
7140	} else {
7141		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7142		ice_set_link(vsi, false);
7143	}
7144
7145	err = ice_vsi_open(vsi);
7146	if (err)
7147		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
7148			   vsi->vsi_num, vsi->vsw->sw_id);
7149
7150	/* Update existing tunnels information */
7151	udp_tunnel_get_rx_info(netdev);
7152
7153	return err;
7154}
7155
7156/**
7157 * ice_stop - Disables a network interface
7158 * @netdev: network interface device structure
7159 *
7160 * The stop entry point is called when an interface is de-activated by the OS,
7161 * and the netdevice enters the DOWN state. The hardware is still under the
7162 * driver's control, but the netdev interface is disabled.
7163 *
7164 * Returns success only - not allowed to fail
7165 */
7166int ice_stop(struct net_device *netdev)
7167{
7168	struct ice_netdev_priv *np = netdev_priv(netdev);
7169	struct ice_vsi *vsi = np->vsi;
7170	struct ice_pf *pf = vsi->back;
7171
7172	if (ice_is_reset_in_progress(pf->state)) {
7173		netdev_err(netdev, "can't stop net device while reset is in progress");
7174		return -EBUSY;
7175	}
7176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7177	ice_vsi_close(vsi);
7178
7179	return 0;
7180}
7181
7182/**
7183 * ice_features_check - Validate encapsulated packet conforms to limits
7184 * @skb: skb buffer
7185 * @netdev: This port's netdev
7186 * @features: Offload features that the stack believes apply
7187 */
7188static netdev_features_t
7189ice_features_check(struct sk_buff *skb,
7190		   struct net_device __always_unused *netdev,
7191		   netdev_features_t features)
7192{
 
7193	size_t len;
7194
7195	/* No point in doing any of this if neither checksum nor GSO are
7196	 * being requested for this frame. We can rule out both by just
7197	 * checking for CHECKSUM_PARTIAL
7198	 */
7199	if (skb->ip_summed != CHECKSUM_PARTIAL)
7200		return features;
7201
7202	/* We cannot support GSO if the MSS is going to be less than
7203	 * 64 bytes. If it is then we need to drop support for GSO.
7204	 */
7205	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
7206		features &= ~NETIF_F_GSO_MASK;
7207
7208	len = skb_network_header(skb) - skb->data;
7209	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
7210		goto out_rm_features;
7211
7212	len = skb_transport_header(skb) - skb_network_header(skb);
7213	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7214		goto out_rm_features;
7215
7216	if (skb->encapsulation) {
7217		len = skb_inner_network_header(skb) - skb_transport_header(skb);
7218		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
7219			goto out_rm_features;
 
 
 
 
 
 
 
 
 
7220
7221		len = skb_inner_transport_header(skb) -
7222		      skb_inner_network_header(skb);
7223		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7224			goto out_rm_features;
7225	}
7226
7227	return features;
7228out_rm_features:
7229	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
7230}
7231
7232static const struct net_device_ops ice_netdev_safe_mode_ops = {
7233	.ndo_open = ice_open,
7234	.ndo_stop = ice_stop,
7235	.ndo_start_xmit = ice_start_xmit,
7236	.ndo_set_mac_address = ice_set_mac_address,
7237	.ndo_validate_addr = eth_validate_addr,
7238	.ndo_change_mtu = ice_change_mtu,
7239	.ndo_get_stats64 = ice_get_stats64,
7240	.ndo_tx_timeout = ice_tx_timeout,
7241	.ndo_bpf = ice_xdp_safe_mode,
7242};
7243
7244static const struct net_device_ops ice_netdev_ops = {
7245	.ndo_open = ice_open,
7246	.ndo_stop = ice_stop,
7247	.ndo_start_xmit = ice_start_xmit,
 
7248	.ndo_features_check = ice_features_check,
 
7249	.ndo_set_rx_mode = ice_set_rx_mode,
7250	.ndo_set_mac_address = ice_set_mac_address,
7251	.ndo_validate_addr = eth_validate_addr,
7252	.ndo_change_mtu = ice_change_mtu,
7253	.ndo_get_stats64 = ice_get_stats64,
7254	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
7255	.ndo_do_ioctl = ice_do_ioctl,
7256	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
7257	.ndo_set_vf_mac = ice_set_vf_mac,
7258	.ndo_get_vf_config = ice_get_vf_cfg,
7259	.ndo_set_vf_trust = ice_set_vf_trust,
7260	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
7261	.ndo_set_vf_link_state = ice_set_vf_link_state,
7262	.ndo_get_vf_stats = ice_get_vf_stats,
 
7263	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
7264	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
 
7265	.ndo_set_features = ice_set_features,
7266	.ndo_bridge_getlink = ice_bridge_getlink,
7267	.ndo_bridge_setlink = ice_bridge_setlink,
7268	.ndo_fdb_add = ice_fdb_add,
7269	.ndo_fdb_del = ice_fdb_del,
7270#ifdef CONFIG_RFS_ACCEL
7271	.ndo_rx_flow_steer = ice_rx_flow_steer,
7272#endif
7273	.ndo_tx_timeout = ice_tx_timeout,
7274	.ndo_bpf = ice_xdp,
7275	.ndo_xdp_xmit = ice_xdp_xmit,
7276	.ndo_xsk_wakeup = ice_xsk_wakeup,
7277};