Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018-2023, Intel Corporation. */
   3
   4/* Intel(R) Ethernet Connection E800 Series Linux Driver */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <generated/utsrelease.h>
   9#include <linux/crash_dump.h>
  10#include "ice.h"
  11#include "ice_base.h"
  12#include "ice_lib.h"
  13#include "ice_fltr.h"
  14#include "ice_dcb_lib.h"
  15#include "ice_dcb_nl.h"
  16#include "devlink/devlink.h"
  17#include "devlink/devlink_port.h"
  18#include "ice_sf_eth.h"
  19#include "ice_hwmon.h"
  20/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
  21 * ice tracepoint functions. This must be done exactly once across the
  22 * ice driver.
  23 */
  24#define CREATE_TRACE_POINTS
  25#include "ice_trace.h"
  26#include "ice_eswitch.h"
  27#include "ice_tc_lib.h"
  28#include "ice_vsi_vlan_ops.h"
  29#include <net/xdp_sock_drv.h>
  30
  31#define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
  32static const char ice_driver_string[] = DRV_SUMMARY;
  33static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
  34
  35/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
  36#define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
  37#define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
  38
 
  39MODULE_DESCRIPTION(DRV_SUMMARY);
  40MODULE_IMPORT_NS("LIBIE");
  41MODULE_LICENSE("GPL v2");
  42MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
  43
  44static int debug = -1;
  45module_param(debug, int, 0644);
  46#ifndef CONFIG_DYNAMIC_DEBUG
  47MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
  48#else
  49MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
  50#endif /* !CONFIG_DYNAMIC_DEBUG */
  51
  52DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
  53EXPORT_SYMBOL(ice_xdp_locking_key);
  54
  55/**
  56 * ice_hw_to_dev - Get device pointer from the hardware structure
  57 * @hw: pointer to the device HW structure
  58 *
  59 * Used to access the device pointer from compilation units which can't easily
  60 * include the definition of struct ice_pf without leading to circular header
  61 * dependencies.
  62 */
  63struct device *ice_hw_to_dev(struct ice_hw *hw)
  64{
  65	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
  66
  67	return &pf->pdev->dev;
  68}
  69
  70static struct workqueue_struct *ice_wq;
  71struct workqueue_struct *ice_lag_wq;
  72static const struct net_device_ops ice_netdev_safe_mode_ops;
  73static const struct net_device_ops ice_netdev_ops;
 
  74
  75static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
  76
  77static void ice_vsi_release_all(struct ice_pf *pf);
  78
  79static int ice_rebuild_channels(struct ice_pf *pf);
  80static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
  81
  82static int
  83ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
  84		     void *cb_priv, enum tc_setup_type type, void *type_data,
  85		     void *data,
  86		     void (*cleanup)(struct flow_block_cb *block_cb));
  87
  88bool netif_is_ice(const struct net_device *dev)
  89{
  90	return dev && (dev->netdev_ops == &ice_netdev_ops ||
  91		       dev->netdev_ops == &ice_netdev_safe_mode_ops);
  92}
  93
  94/**
  95 * ice_get_tx_pending - returns number of Tx descriptors not processed
  96 * @ring: the ring of descriptors
  97 */
  98static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
  99{
 100	u16 head, tail;
 101
 102	head = ring->next_to_clean;
 103	tail = ring->next_to_use;
 104
 105	if (head != tail)
 106		return (head < tail) ?
 107			tail - head : (tail + ring->count - head);
 108	return 0;
 109}
 110
 111/**
 112 * ice_check_for_hang_subtask - check for and recover hung queues
 113 * @pf: pointer to PF struct
 114 */
 115static void ice_check_for_hang_subtask(struct ice_pf *pf)
 116{
 117	struct ice_vsi *vsi = NULL;
 118	struct ice_hw *hw;
 119	unsigned int i;
 120	int packets;
 121	u32 v;
 122
 123	ice_for_each_vsi(pf, v)
 124		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
 125			vsi = pf->vsi[v];
 126			break;
 127		}
 128
 129	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
 130		return;
 131
 132	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
 133		return;
 134
 135	hw = &vsi->back->hw;
 136
 137	ice_for_each_txq(vsi, i) {
 138		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
 139		struct ice_ring_stats *ring_stats;
 140
 141		if (!tx_ring)
 142			continue;
 143		if (ice_ring_ch_enabled(tx_ring))
 144			continue;
 145
 146		ring_stats = tx_ring->ring_stats;
 147		if (!ring_stats)
 148			continue;
 149
 150		if (tx_ring->desc) {
 151			/* If packet counter has not changed the queue is
 152			 * likely stalled, so force an interrupt for this
 153			 * queue.
 154			 *
 155			 * prev_pkt would be negative if there was no
 156			 * pending work.
 157			 */
 158			packets = ring_stats->stats.pkts & INT_MAX;
 159			if (ring_stats->tx_stats.prev_pkt == packets) {
 160				/* Trigger sw interrupt to revive the queue */
 161				ice_trigger_sw_intr(hw, tx_ring->q_vector);
 162				continue;
 163			}
 164
 165			/* Memory barrier between read of packet count and call
 166			 * to ice_get_tx_pending()
 167			 */
 168			smp_rmb();
 169			ring_stats->tx_stats.prev_pkt =
 170			    ice_get_tx_pending(tx_ring) ? packets : -1;
 171		}
 172	}
 173}
 174
 175/**
 176 * ice_init_mac_fltr - Set initial MAC filters
 177 * @pf: board private structure
 178 *
 179 * Set initial set of MAC filters for PF VSI; configure filters for permanent
 180 * address and broadcast address. If an error is encountered, netdevice will be
 181 * unregistered.
 182 */
 183static int ice_init_mac_fltr(struct ice_pf *pf)
 184{
 
 185	struct ice_vsi *vsi;
 186	u8 *perm_addr;
 187
 188	vsi = ice_get_main_vsi(pf);
 189	if (!vsi)
 190		return -EINVAL;
 191
 192	perm_addr = vsi->port_info->mac.perm_addr;
 193	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
 
 
 
 
 194}
 195
 196/**
 197 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
 198 * @netdev: the net device on which the sync is happening
 199 * @addr: MAC address to sync
 200 *
 201 * This is a callback function which is called by the in kernel device sync
 202 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
 203 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
 204 * MAC filters from the hardware.
 205 */
 206static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
 207{
 208	struct ice_netdev_priv *np = netdev_priv(netdev);
 209	struct ice_vsi *vsi = np->vsi;
 210
 211	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
 212				     ICE_FWD_TO_VSI))
 213		return -EINVAL;
 214
 215	return 0;
 216}
 217
 218/**
 219 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
 220 * @netdev: the net device on which the unsync is happening
 221 * @addr: MAC address to unsync
 222 *
 223 * This is a callback function which is called by the in kernel device unsync
 224 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
 225 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
 226 * delete the MAC filters from the hardware.
 227 */
 228static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
 229{
 230	struct ice_netdev_priv *np = netdev_priv(netdev);
 231	struct ice_vsi *vsi = np->vsi;
 232
 233	/* Under some circumstances, we might receive a request to delete our
 234	 * own device address from our uc list. Because we store the device
 235	 * address in the VSI's MAC filter list, we need to ignore such
 236	 * requests and not delete our device address from this list.
 237	 */
 238	if (ether_addr_equal(addr, netdev->dev_addr))
 239		return 0;
 240
 241	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
 242				     ICE_FWD_TO_VSI))
 243		return -EINVAL;
 244
 245	return 0;
 246}
 247
 248/**
 249 * ice_vsi_fltr_changed - check if filter state changed
 250 * @vsi: VSI to be checked
 251 *
 252 * returns true if filter state has changed, false otherwise.
 253 */
 254static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
 255{
 256	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
 257	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 
 258}
 259
 260/**
 261 * ice_set_promisc - Enable promiscuous mode for a given PF
 262 * @vsi: the VSI being configured
 263 * @promisc_m: mask of promiscuous config bits
 
 264 *
 265 */
 266static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
 267{
 268	int status;
 
 269
 270	if (vsi->type != ICE_VSI_PF)
 271		return 0;
 272
 273	if (ice_vsi_has_non_zero_vlans(vsi)) {
 274		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
 275		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
 276						       promisc_m);
 277	} else {
 278		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
 279						  promisc_m, 0);
 
 
 
 
 280	}
 281	if (status && status != -EEXIST)
 282		return status;
 283
 284	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
 285		   vsi->vsi_num, promisc_m);
 286	return 0;
 287}
 288
 289/**
 290 * ice_clear_promisc - Disable promiscuous mode for a given PF
 291 * @vsi: the VSI being configured
 292 * @promisc_m: mask of promiscuous config bits
 293 *
 294 */
 295static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
 296{
 297	int status;
 298
 299	if (vsi->type != ICE_VSI_PF)
 300		return 0;
 301
 302	if (ice_vsi_has_non_zero_vlans(vsi)) {
 303		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
 304		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
 305							 promisc_m);
 306	} else {
 307		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
 308						    promisc_m, 0);
 309	}
 310
 311	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
 312		   vsi->vsi_num, promisc_m);
 313	return status;
 314}
 315
 316/**
 317 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
 318 * @vsi: ptr to the VSI
 319 *
 320 * Push any outstanding VSI filter changes through the AdminQ.
 321 */
 322static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
 323{
 324	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
 325	struct device *dev = ice_pf_to_dev(vsi->back);
 326	struct net_device *netdev = vsi->netdev;
 327	bool promisc_forced_on = false;
 328	struct ice_pf *pf = vsi->back;
 329	struct ice_hw *hw = &pf->hw;
 
 330	u32 changed_flags = 0;
 331	int err;
 
 332
 333	if (!vsi->netdev)
 334		return -EINVAL;
 335
 336	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
 337		usleep_range(1000, 2000);
 338
 339	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
 340	vsi->current_netdev_flags = vsi->netdev->flags;
 341
 342	INIT_LIST_HEAD(&vsi->tmp_sync_list);
 343	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
 344
 345	if (ice_vsi_fltr_changed(vsi)) {
 346		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
 347		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 
 348
 349		/* grab the netdev's addr_list_lock */
 350		netif_addr_lock_bh(netdev);
 351		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
 352			      ice_add_mac_to_unsync_list);
 353		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
 354			      ice_add_mac_to_unsync_list);
 355		/* our temp lists are populated. release lock */
 356		netif_addr_unlock_bh(netdev);
 357	}
 358
 359	/* Remove MAC addresses in the unsync list */
 360	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
 361	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
 362	if (err) {
 363		netdev_err(netdev, "Failed to delete MAC filters\n");
 364		/* if we failed because of alloc failures, just bail */
 365		if (err == -ENOMEM)
 
 366			goto out;
 
 367	}
 368
 369	/* Add MAC addresses in the sync list */
 370	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
 371	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
 372	/* If filter is added successfully or already exists, do not go into
 373	 * 'if' condition and report it as error. Instead continue processing
 374	 * rest of the function.
 375	 */
 376	if (err && err != -EEXIST) {
 377		netdev_err(netdev, "Failed to add MAC filters\n");
 378		/* If there is no more space for new umac filters, VSI
 379		 * should go into promiscuous mode. There should be some
 380		 * space reserved for promiscuous filters.
 381		 */
 382		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
 383		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
 384				      vsi->state)) {
 385			promisc_forced_on = true;
 386			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
 387				    vsi->vsi_num);
 388		} else {
 
 389			goto out;
 390		}
 391	}
 392	err = 0;
 393	/* check for changes in promiscuous modes */
 394	if (changed_flags & IFF_ALLMULTI) {
 395		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
 396			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
 
 
 
 
 
 397			if (err) {
 
 
 398				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
 399				goto out_promisc;
 400			}
 401		} else {
 402			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
 403			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
 
 
 
 
 
 404			if (err) {
 
 
 405				vsi->current_netdev_flags |= IFF_ALLMULTI;
 406				goto out_promisc;
 407			}
 408		}
 409	}
 410
 411	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
 412	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
 413		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
 414		if (vsi->current_netdev_flags & IFF_PROMISC) {
 415			/* Apply Rx filter rule to get traffic from wire */
 416			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
 417				err = ice_set_dflt_vsi(vsi);
 418				if (err && err != -EEXIST) {
 419					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
 420						   err, vsi->vsi_num);
 421					vsi->current_netdev_flags &=
 422						~IFF_PROMISC;
 423					goto out_promisc;
 424				}
 425				err = 0;
 426				vlan_ops->dis_rx_filtering(vsi);
 427
 428				/* promiscuous mode implies allmulticast so
 429				 * that VSIs that are in promiscuous mode are
 430				 * subscribed to multicast packets coming to
 431				 * the port
 432				 */
 433				err = ice_set_promisc(vsi,
 434						      ICE_MCAST_PROMISC_BITS);
 435				if (err)
 436					goto out_promisc;
 437			}
 438		} else {
 439			/* Clear Rx filter to remove traffic from wire */
 440			if (ice_is_vsi_dflt_vsi(vsi)) {
 441				err = ice_clear_dflt_vsi(vsi);
 442				if (err) {
 443					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
 444						   err, vsi->vsi_num);
 445					vsi->current_netdev_flags |=
 446						IFF_PROMISC;
 447					goto out_promisc;
 448				}
 449				if (vsi->netdev->features &
 450				    NETIF_F_HW_VLAN_CTAG_FILTER)
 451					vlan_ops->ena_rx_filtering(vsi);
 452			}
 453
 454			/* disable allmulti here, but only if allmulti is not
 455			 * still enabled for the netdev
 456			 */
 457			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
 458				err = ice_clear_promisc(vsi,
 459							ICE_MCAST_PROMISC_BITS);
 460				if (err) {
 461					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
 462						   err, vsi->vsi_num);
 463				}
 464			}
 465		}
 466	}
 467	goto exit;
 468
 469out_promisc:
 470	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
 471	goto exit;
 472out:
 473	/* if something went wrong then set the changed flag so we try again */
 474	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
 475	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 476exit:
 477	clear_bit(ICE_CFG_BUSY, vsi->state);
 478	return err;
 479}
 480
 481/**
 482 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
 483 * @pf: board private structure
 484 */
 485static void ice_sync_fltr_subtask(struct ice_pf *pf)
 486{
 487	int v;
 488
 489	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
 490		return;
 491
 492	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 493
 494	ice_for_each_vsi(pf, v)
 495		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
 496		    ice_vsi_sync_fltr(pf->vsi[v])) {
 497			/* come back and try again later */
 498			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 499			break;
 500		}
 501}
 502
 503/**
 504 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
 505 * @pf: the PF
 506 * @locked: is the rtnl_lock already held
 507 */
 508static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
 509{
 510	int node;
 511	int v;
 512
 513	ice_for_each_vsi(pf, v)
 514		if (pf->vsi[v])
 515			ice_dis_vsi(pf->vsi[v], locked);
 516
 517	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
 518		pf->pf_agg_node[node].num_vsis = 0;
 519
 520	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
 521		pf->vf_agg_node[node].num_vsis = 0;
 522}
 523
 524/**
 525 * ice_prepare_for_reset - prep for reset
 526 * @pf: board private structure
 527 * @reset_type: reset type requested
 528 *
 529 * Inform or close all dependent features in prep for reset.
 530 */
 531static void
 532ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
 533{
 534	struct ice_hw *hw = &pf->hw;
 535	struct ice_vsi *vsi;
 536	struct ice_vf *vf;
 537	unsigned int bkt;
 538
 539	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
 540
 541	/* already prepared for reset */
 542	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
 543		return;
 544
 545	synchronize_irq(pf->oicr_irq.virq);
 546
 547	ice_unplug_aux_dev(pf);
 548
 549	/* Notify VFs of impending reset */
 550	if (ice_check_sq_alive(hw, &hw->mailboxq))
 551		ice_vc_notify_reset(pf);
 552
 553	/* Disable VFs until reset is completed */
 554	mutex_lock(&pf->vfs.table_lock);
 555	ice_for_each_vf(pf, bkt, vf)
 556		ice_set_vf_state_dis(vf);
 557	mutex_unlock(&pf->vfs.table_lock);
 558
 559	if (ice_is_eswitch_mode_switchdev(pf)) {
 560		rtnl_lock();
 561		ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge);
 562		rtnl_unlock();
 563	}
 564
 565	/* release ADQ specific HW and SW resources */
 566	vsi = ice_get_main_vsi(pf);
 567	if (!vsi)
 568		goto skip;
 569
 570	/* to be on safe side, reset orig_rss_size so that normal flow
 571	 * of deciding rss_size can take precedence
 572	 */
 573	vsi->orig_rss_size = 0;
 574
 575	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
 576		if (reset_type == ICE_RESET_PFR) {
 577			vsi->old_ena_tc = vsi->all_enatc;
 578			vsi->old_numtc = vsi->all_numtc;
 579		} else {
 580			ice_remove_q_channels(vsi, true);
 581
 582			/* for other reset type, do not support channel rebuild
 583			 * hence reset needed info
 584			 */
 585			vsi->old_ena_tc = 0;
 586			vsi->all_enatc = 0;
 587			vsi->old_numtc = 0;
 588			vsi->all_numtc = 0;
 589			vsi->req_txq = 0;
 590			vsi->req_rxq = 0;
 591			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
 592			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
 593		}
 594	}
 595
 596	if (vsi->netdev)
 597		netif_device_detach(vsi->netdev);
 598skip:
 599
 600	/* clear SW filtering DB */
 601	ice_clear_hw_tbls(hw);
 602	/* disable the VSIs and their queues that are not already DOWN */
 603	set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
 604	ice_pf_dis_all_vsi(pf, false);
 605
 606	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
 607		ice_ptp_prepare_for_reset(pf, reset_type);
 608
 609	if (ice_is_feature_supported(pf, ICE_F_GNSS))
 610		ice_gnss_exit(pf);
 611
 612	if (hw->port_info)
 613		ice_sched_clear_port(hw->port_info);
 614
 615	ice_shutdown_all_ctrlq(hw, false);
 616
 617	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
 618}
 619
 620/**
 621 * ice_do_reset - Initiate one of many types of resets
 622 * @pf: board private structure
 623 * @reset_type: reset type requested before this function was called.
 
 624 */
 625static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
 626{
 627	struct device *dev = ice_pf_to_dev(pf);
 628	struct ice_hw *hw = &pf->hw;
 629
 630	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
 631
 632	if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
 633		dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
 634		reset_type = ICE_RESET_CORER;
 635	}
 636
 637	ice_prepare_for_reset(pf, reset_type);
 638
 639	/* trigger the reset */
 640	if (ice_reset(hw, reset_type)) {
 641		dev_err(dev, "reset %d failed\n", reset_type);
 642		set_bit(ICE_RESET_FAILED, pf->state);
 643		clear_bit(ICE_RESET_OICR_RECV, pf->state);
 644		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 645		clear_bit(ICE_PFR_REQ, pf->state);
 646		clear_bit(ICE_CORER_REQ, pf->state);
 647		clear_bit(ICE_GLOBR_REQ, pf->state);
 648		wake_up(&pf->reset_wait_queue);
 649		return;
 650	}
 651
 652	/* PFR is a bit of a special case because it doesn't result in an OICR
 653	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
 654	 * associated state bits.
 655	 */
 656	if (reset_type == ICE_RESET_PFR) {
 657		pf->pfr_count++;
 658		ice_rebuild(pf, reset_type);
 659		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 660		clear_bit(ICE_PFR_REQ, pf->state);
 661		wake_up(&pf->reset_wait_queue);
 662		ice_reset_all_vfs(pf);
 663	}
 664}
 665
 666/**
 667 * ice_reset_subtask - Set up for resetting the device and driver
 668 * @pf: board private structure
 669 */
 670static void ice_reset_subtask(struct ice_pf *pf)
 671{
 672	enum ice_reset_req reset_type = ICE_RESET_INVAL;
 673
 674	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
 675	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
 676	 * of reset is pending and sets bits in pf->state indicating the reset
 677	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
 678	 * prepare for pending reset if not already (for PF software-initiated
 679	 * global resets the software should already be prepared for it as
 680	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
 681	 * by firmware or software on other PFs, that bit is not set so prepare
 682	 * for the reset now), poll for reset done, rebuild and return.
 683	 */
 684	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
 685		/* Perform the largest reset requested */
 686		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
 687			reset_type = ICE_RESET_CORER;
 688		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
 689			reset_type = ICE_RESET_GLOBR;
 690		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
 691			reset_type = ICE_RESET_EMPR;
 692		/* return if no valid reset type requested */
 693		if (reset_type == ICE_RESET_INVAL)
 694			return;
 695		ice_prepare_for_reset(pf, reset_type);
 696
 697		/* make sure we are ready to rebuild */
 698		if (ice_check_reset(&pf->hw)) {
 699			set_bit(ICE_RESET_FAILED, pf->state);
 700		} else {
 701			/* done with reset. start rebuild */
 702			pf->hw.reset_ongoing = false;
 703			ice_rebuild(pf, reset_type);
 704			/* clear bit to resume normal operations, but
 705			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
 706			 */
 707			clear_bit(ICE_RESET_OICR_RECV, pf->state);
 708			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 709			clear_bit(ICE_PFR_REQ, pf->state);
 710			clear_bit(ICE_CORER_REQ, pf->state);
 711			clear_bit(ICE_GLOBR_REQ, pf->state);
 712			wake_up(&pf->reset_wait_queue);
 713			ice_reset_all_vfs(pf);
 714		}
 715
 716		return;
 717	}
 718
 719	/* No pending resets to finish processing. Check for new resets */
 720	if (test_bit(ICE_PFR_REQ, pf->state)) {
 721		reset_type = ICE_RESET_PFR;
 722		if (pf->lag && pf->lag->bonded) {
 723			dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
 724			reset_type = ICE_RESET_CORER;
 725		}
 726	}
 727	if (test_bit(ICE_CORER_REQ, pf->state))
 728		reset_type = ICE_RESET_CORER;
 729	if (test_bit(ICE_GLOBR_REQ, pf->state))
 730		reset_type = ICE_RESET_GLOBR;
 731	/* If no valid reset type requested just return */
 732	if (reset_type == ICE_RESET_INVAL)
 733		return;
 734
 735	/* reset if not already down or busy */
 736	if (!test_bit(ICE_DOWN, pf->state) &&
 737	    !test_bit(ICE_CFG_BUSY, pf->state)) {
 738		ice_do_reset(pf, reset_type);
 739	}
 740}
 741
 742/**
 743 * ice_print_topo_conflict - print topology conflict message
 744 * @vsi: the VSI whose topology status is being checked
 745 */
 746static void ice_print_topo_conflict(struct ice_vsi *vsi)
 747{
 748	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
 749	case ICE_AQ_LINK_TOPO_CONFLICT:
 750	case ICE_AQ_LINK_MEDIA_CONFLICT:
 751	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
 752	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
 753	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
 754		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
 755		break;
 756	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
 757		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
 758			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
 759		else
 760			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
 761		break;
 762	default:
 763		break;
 764	}
 765}
 766
 767/**
 768 * ice_print_link_msg - print link up or down message
 769 * @vsi: the VSI whose link status is being queried
 770 * @isup: boolean for if the link is now up or down
 771 */
 772void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
 773{
 774	struct ice_aqc_get_phy_caps_data *caps;
 775	const char *an_advertised;
 
 776	const char *fec_req;
 777	const char *speed;
 778	const char *fec;
 779	const char *fc;
 780	const char *an;
 781	int status;
 782
 783	if (!vsi)
 784		return;
 785
 786	if (vsi->current_isup == isup)
 787		return;
 788
 789	vsi->current_isup = isup;
 790
 791	if (!isup) {
 792		netdev_info(vsi->netdev, "NIC Link is Down\n");
 793		return;
 794	}
 795
 796	switch (vsi->port_info->phy.link_info.link_speed) {
 797	case ICE_AQ_LINK_SPEED_200GB:
 798		speed = "200 G";
 799		break;
 800	case ICE_AQ_LINK_SPEED_100GB:
 801		speed = "100 G";
 802		break;
 803	case ICE_AQ_LINK_SPEED_50GB:
 804		speed = "50 G";
 805		break;
 806	case ICE_AQ_LINK_SPEED_40GB:
 807		speed = "40 G";
 808		break;
 809	case ICE_AQ_LINK_SPEED_25GB:
 810		speed = "25 G";
 811		break;
 812	case ICE_AQ_LINK_SPEED_20GB:
 813		speed = "20 G";
 814		break;
 815	case ICE_AQ_LINK_SPEED_10GB:
 816		speed = "10 G";
 817		break;
 818	case ICE_AQ_LINK_SPEED_5GB:
 819		speed = "5 G";
 820		break;
 821	case ICE_AQ_LINK_SPEED_2500MB:
 822		speed = "2.5 G";
 823		break;
 824	case ICE_AQ_LINK_SPEED_1000MB:
 825		speed = "1 G";
 826		break;
 827	case ICE_AQ_LINK_SPEED_100MB:
 828		speed = "100 M";
 829		break;
 830	default:
 831		speed = "Unknown ";
 832		break;
 833	}
 834
 835	switch (vsi->port_info->fc.current_mode) {
 836	case ICE_FC_FULL:
 837		fc = "Rx/Tx";
 838		break;
 839	case ICE_FC_TX_PAUSE:
 840		fc = "Tx";
 841		break;
 842	case ICE_FC_RX_PAUSE:
 843		fc = "Rx";
 844		break;
 845	case ICE_FC_NONE:
 846		fc = "None";
 847		break;
 848	default:
 849		fc = "Unknown";
 850		break;
 851	}
 852
 853	/* Get FEC mode based on negotiated link info */
 854	switch (vsi->port_info->phy.link_info.fec_info) {
 855	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
 856	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
 857		fec = "RS-FEC";
 858		break;
 859	case ICE_AQ_LINK_25G_KR_FEC_EN:
 860		fec = "FC-FEC/BASE-R";
 861		break;
 862	default:
 863		fec = "NONE";
 864		break;
 865	}
 866
 867	/* check if autoneg completed, might be false due to not supported */
 868	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
 869		an = "True";
 870	else
 871		an = "False";
 872
 873	/* Get FEC mode requested based on PHY caps last SW configuration */
 874	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
 875	if (!caps) {
 876		fec_req = "Unknown";
 877		an_advertised = "Unknown";
 878		goto done;
 879	}
 880
 881	status = ice_aq_get_phy_caps(vsi->port_info, false,
 882				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
 883	if (status)
 884		netdev_info(vsi->netdev, "Get phy capability failed.\n");
 885
 886	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
 887
 888	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
 889	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
 890		fec_req = "RS-FEC";
 891	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
 892		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
 893		fec_req = "FC-FEC/BASE-R";
 894	else
 895		fec_req = "NONE";
 896
 897	kfree(caps);
 898
 899done:
 900	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
 901		    speed, fec_req, fec, an_advertised, an, fc);
 902	ice_print_topo_conflict(vsi);
 903}
 904
 905/**
 906 * ice_vsi_link_event - update the VSI's netdev
 907 * @vsi: the VSI on which the link event occurred
 908 * @link_up: whether or not the VSI needs to be set up or down
 909 */
 910static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
 911{
 912	if (!vsi)
 913		return;
 914
 915	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
 916		return;
 917
 918	if (vsi->type == ICE_VSI_PF) {
 919		if (link_up == netif_carrier_ok(vsi->netdev))
 920			return;
 921
 922		if (link_up) {
 923			netif_carrier_on(vsi->netdev);
 924			netif_tx_wake_all_queues(vsi->netdev);
 925		} else {
 926			netif_carrier_off(vsi->netdev);
 927			netif_tx_stop_all_queues(vsi->netdev);
 928		}
 929	}
 930}
 931
 932/**
 933 * ice_set_dflt_mib - send a default config MIB to the FW
 934 * @pf: private PF struct
 935 *
 936 * This function sends a default configuration MIB to the FW.
 937 *
 938 * If this function errors out at any point, the driver is still able to
 939 * function.  The main impact is that LFC may not operate as expected.
 940 * Therefore an error state in this function should be treated with a DBG
 941 * message and continue on with driver rebuild/reenable.
 942 */
 943static void ice_set_dflt_mib(struct ice_pf *pf)
 944{
 945	struct device *dev = ice_pf_to_dev(pf);
 946	u8 mib_type, *buf, *lldpmib = NULL;
 947	u16 len, typelen, offset = 0;
 948	struct ice_lldp_org_tlv *tlv;
 949	struct ice_hw *hw = &pf->hw;
 950	u32 ouisubtype;
 951
 952	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
 953	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
 954	if (!lldpmib) {
 955		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
 956			__func__);
 957		return;
 958	}
 959
 960	/* Add ETS CFG TLV */
 961	tlv = (struct ice_lldp_org_tlv *)lldpmib;
 962	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
 963		   ICE_IEEE_ETS_TLV_LEN);
 964	tlv->typelen = htons(typelen);
 965	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 966		      ICE_IEEE_SUBTYPE_ETS_CFG);
 967	tlv->ouisubtype = htonl(ouisubtype);
 968
 969	buf = tlv->tlvinfo;
 970	buf[0] = 0;
 971
 972	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
 973	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
 974	 * Octets 13 - 20 are TSA values - leave as zeros
 975	 */
 976	buf[5] = 0x64;
 977	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
 978	offset += len + 2;
 979	tlv = (struct ice_lldp_org_tlv *)
 980		((char *)tlv + sizeof(tlv->typelen) + len);
 981
 982	/* Add ETS REC TLV */
 983	buf = tlv->tlvinfo;
 984	tlv->typelen = htons(typelen);
 985
 986	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 987		      ICE_IEEE_SUBTYPE_ETS_REC);
 988	tlv->ouisubtype = htonl(ouisubtype);
 989
 990	/* First octet of buf is reserved
 991	 * Octets 1 - 4 map UP to TC - all UPs map to zero
 992	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
 993	 * Octets 13 - 20 are TSA value - leave as zeros
 994	 */
 995	buf[5] = 0x64;
 996	offset += len + 2;
 997	tlv = (struct ice_lldp_org_tlv *)
 998		((char *)tlv + sizeof(tlv->typelen) + len);
 999
1000	/* Add PFC CFG TLV */
1001	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1002		   ICE_IEEE_PFC_TLV_LEN);
1003	tlv->typelen = htons(typelen);
1004
1005	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1006		      ICE_IEEE_SUBTYPE_PFC_CFG);
1007	tlv->ouisubtype = htonl(ouisubtype);
1008
1009	/* Octet 1 left as all zeros - PFC disabled */
1010	buf[0] = 0x08;
1011	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
1012	offset += len + 2;
1013
1014	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1015		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1016
1017	kfree(lldpmib);
1018}
1019
1020/**
1021 * ice_check_phy_fw_load - check if PHY FW load failed
1022 * @pf: pointer to PF struct
1023 * @link_cfg_err: bitmap from the link info structure
1024 *
1025 * check if external PHY FW load failed and print an error message if it did
1026 */
1027static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1028{
1029	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1030		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1031		return;
1032	}
1033
1034	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1035		return;
1036
1037	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1038		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1039		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1040	}
1041}
1042
1043/**
1044 * ice_check_module_power
1045 * @pf: pointer to PF struct
1046 * @link_cfg_err: bitmap from the link info structure
1047 *
1048 * check module power level returned by a previous call to aq_get_link_info
1049 * and print error messages if module power level is not supported
1050 */
1051static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1052{
1053	/* if module power level is supported, clear the flag */
1054	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1055			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1056		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1057		return;
1058	}
1059
1060	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1061	 * above block didn't clear this bit, there's nothing to do
1062	 */
1063	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1064		return;
1065
1066	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1067		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1068		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1069	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1070		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1071		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1072	}
1073}
1074
1075/**
1076 * ice_check_link_cfg_err - check if link configuration failed
1077 * @pf: pointer to the PF struct
1078 * @link_cfg_err: bitmap from the link info structure
1079 *
1080 * print if any link configuration failure happens due to the value in the
1081 * link_cfg_err parameter in the link info structure
1082 */
1083static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1084{
1085	ice_check_module_power(pf, link_cfg_err);
1086	ice_check_phy_fw_load(pf, link_cfg_err);
1087}
1088
1089/**
1090 * ice_link_event - process the link event
1091 * @pf: PF that the link event is associated with
1092 * @pi: port_info for the port that the link event is associated with
1093 * @link_up: true if the physical link is up and false if it is down
1094 * @link_speed: current link speed received from the link event
1095 *
1096 * Returns 0 on success and negative on failure
1097 */
1098static int
1099ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1100	       u16 link_speed)
1101{
1102	struct device *dev = ice_pf_to_dev(pf);
1103	struct ice_phy_info *phy_info;
 
1104	struct ice_vsi *vsi;
1105	u16 old_link_speed;
1106	bool old_link;
1107	int status;
1108
1109	phy_info = &pi->phy;
1110	phy_info->link_info_old = phy_info->link_info;
1111
1112	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1113	old_link_speed = phy_info->link_info_old.link_speed;
1114
1115	/* update the link info structures and re-enable link events,
1116	 * don't bail on failure due to other book keeping needed
1117	 */
1118	status = ice_update_link_info(pi);
1119	if (status)
1120		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1121			pi->lport, status,
1122			ice_aq_str(pi->hw->adminq.sq_last_status));
1123
1124	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1125
1126	/* Check if the link state is up after updating link info, and treat
1127	 * this event as an UP event since the link is actually UP now.
1128	 */
1129	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1130		link_up = true;
1131
1132	vsi = ice_get_main_vsi(pf);
1133	if (!vsi || !vsi->port_info)
1134		return -EINVAL;
1135
1136	/* turn off PHY if media was removed */
1137	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1138	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1139		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1140		ice_set_link(vsi, false);
1141	}
1142
1143	/* if the old link up/down and speed is the same as the new */
1144	if (link_up == old_link && link_speed == old_link_speed)
1145		return 0;
1146
1147	ice_ptp_link_change(pf, link_up);
1148
1149	if (ice_is_dcb_active(pf)) {
1150		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1151			ice_dcb_rebuild(pf);
1152	} else {
1153		if (link_up)
1154			ice_set_dflt_mib(pf);
1155	}
1156	ice_vsi_link_event(vsi, link_up);
1157	ice_print_link_msg(vsi, link_up);
1158
1159	ice_vc_notify_link_state(pf);
1160
1161	return 0;
1162}
1163
1164/**
1165 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1166 * @pf: board private structure
1167 */
1168static void ice_watchdog_subtask(struct ice_pf *pf)
1169{
1170	int i;
1171
1172	/* if interface is down do nothing */
1173	if (test_bit(ICE_DOWN, pf->state) ||
1174	    test_bit(ICE_CFG_BUSY, pf->state))
1175		return;
1176
1177	/* make sure we don't do these things too often */
1178	if (time_before(jiffies,
1179			pf->serv_tmr_prev + pf->serv_tmr_period))
1180		return;
1181
1182	pf->serv_tmr_prev = jiffies;
1183
1184	/* Update the stats for active netdevs so the network stack
1185	 * can look at updated numbers whenever it cares to
1186	 */
1187	ice_update_pf_stats(pf);
1188	ice_for_each_vsi(pf, i)
1189		if (pf->vsi[i] && pf->vsi[i]->netdev)
1190			ice_update_vsi_stats(pf->vsi[i]);
1191}
1192
1193/**
1194 * ice_init_link_events - enable/initialize link events
1195 * @pi: pointer to the port_info instance
1196 *
1197 * Returns -EIO on failure, 0 on success
1198 */
1199static int ice_init_link_events(struct ice_port_info *pi)
1200{
1201	u16 mask;
1202
1203	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1204		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1205		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1206
1207	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1208		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1209			pi->lport);
1210		return -EIO;
1211	}
1212
1213	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1214		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1215			pi->lport);
1216		return -EIO;
1217	}
1218
1219	return 0;
1220}
1221
1222/**
1223 * ice_handle_link_event - handle link event via ARQ
1224 * @pf: PF that the link event is associated with
1225 * @event: event structure containing link status info
1226 */
1227static int
1228ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1229{
1230	struct ice_aqc_get_link_status_data *link_data;
1231	struct ice_port_info *port_info;
1232	int status;
1233
1234	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1235	port_info = pf->hw.port_info;
1236	if (!port_info)
1237		return -EINVAL;
1238
1239	status = ice_link_event(pf, port_info,
1240				!!(link_data->link_info & ICE_AQ_LINK_UP),
1241				le16_to_cpu(link_data->link_speed));
1242	if (status)
1243		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1244			status);
1245
1246	return status;
1247}
1248
1249/**
1250 * ice_get_fwlog_data - copy the FW log data from ARQ event
1251 * @pf: PF that the FW log event is associated with
1252 * @event: event structure containing FW log data
1253 */
1254static void
1255ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
1256{
1257	struct ice_fwlog_data *fwlog;
1258	struct ice_hw *hw = &pf->hw;
1259
1260	fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
1261
1262	memset(fwlog->data, 0, PAGE_SIZE);
1263	fwlog->data_size = le16_to_cpu(event->desc.datalen);
1264
1265	memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
1266	ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
1267
1268	if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
1269		/* the rings are full so bump the head to create room */
1270		ice_fwlog_ring_increment(&hw->fwlog_ring.head,
1271					 hw->fwlog_ring.size);
1272	}
1273}
1274
1275/**
1276 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1277 * @pf: pointer to the PF private structure
1278 * @task: intermediate helper storage and identifier for waiting
1279 * @opcode: the opcode to wait for
 
 
1280 *
1281 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1282 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1283 *
1284 * Calls are separated to allow caller registering for event before sending
1285 * the command, which mitigates a race between registering and FW responding.
1286 *
1287 * To obtain only the descriptor contents, pass an task->event with null
1288 * msg_buf. If the complete data buffer is desired, allocate the
1289 * task->event.msg_buf with enough space ahead of time.
 
 
1290 */
1291void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1292			   u16 opcode)
1293{
 
 
 
 
 
 
 
 
 
 
1294	INIT_HLIST_NODE(&task->entry);
1295	task->opcode = opcode;
 
1296	task->state = ICE_AQ_TASK_WAITING;
1297
1298	spin_lock_bh(&pf->aq_wait_lock);
1299	hlist_add_head(&task->entry, &pf->aq_wait_list);
1300	spin_unlock_bh(&pf->aq_wait_lock);
1301}
1302
1303/**
1304 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1305 * @pf: pointer to the PF private structure
1306 * @task: ptr prepared by ice_aq_prep_for_event()
1307 * @timeout: how long to wait, in jiffies
1308 *
1309 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1310 * current thread will be put to sleep until the specified event occurs or
1311 * until the given timeout is reached.
1312 *
1313 * Returns: zero on success, or a negative error code on failure.
1314 */
1315int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1316			  unsigned long timeout)
1317{
1318	enum ice_aq_task_state *state = &task->state;
1319	struct device *dev = ice_pf_to_dev(pf);
1320	unsigned long start = jiffies;
1321	long ret;
1322	int err;
1323
1324	ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1325					       *state != ICE_AQ_TASK_WAITING,
1326					       timeout);
1327	switch (*state) {
1328	case ICE_AQ_TASK_NOT_PREPARED:
1329		WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1330		err = -EINVAL;
1331		break;
1332	case ICE_AQ_TASK_WAITING:
1333		err = ret < 0 ? ret : -ETIMEDOUT;
1334		break;
1335	case ICE_AQ_TASK_CANCELED:
1336		err = ret < 0 ? ret : -ECANCELED;
1337		break;
1338	case ICE_AQ_TASK_COMPLETE:
1339		err = ret < 0 ? ret : 0;
1340		break;
1341	default:
1342		WARN(1, "Unexpected AdminQ wait task state %u", *state);
1343		err = -EINVAL;
1344		break;
1345	}
1346
1347	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1348		jiffies_to_msecs(jiffies - start),
1349		jiffies_to_msecs(timeout),
1350		task->opcode);
1351
1352	spin_lock_bh(&pf->aq_wait_lock);
1353	hlist_del(&task->entry);
1354	spin_unlock_bh(&pf->aq_wait_lock);
 
1355
1356	return err;
1357}
1358
1359/**
1360 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1361 * @pf: pointer to the PF private structure
1362 * @opcode: the opcode of the event
1363 * @event: the event to check
1364 *
1365 * Loops over the current list of pending threads waiting for an AdminQ event.
1366 * For each matching task, copy the contents of the event into the task
1367 * structure and wake up the thread.
1368 *
1369 * If multiple threads wait for the same opcode, they will all be woken up.
1370 *
1371 * Note that event->msg_buf will only be duplicated if the event has a buffer
1372 * with enough space already allocated. Otherwise, only the descriptor and
1373 * message length will be copied.
1374 *
1375 * Returns: true if an event was found, false otherwise
1376 */
1377static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1378				struct ice_rq_event_info *event)
1379{
1380	struct ice_rq_event_info *task_ev;
1381	struct ice_aq_task *task;
1382	bool found = false;
1383
1384	spin_lock_bh(&pf->aq_wait_lock);
1385	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1386		if (task->state != ICE_AQ_TASK_WAITING)
1387			continue;
1388		if (task->opcode != opcode)
1389			continue;
1390
1391		task_ev = &task->event;
1392		memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1393		task_ev->msg_len = event->msg_len;
1394
1395		/* Only copy the data buffer if a destination was set */
1396		if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1397			memcpy(task_ev->msg_buf, event->msg_buf,
 
1398			       event->buf_len);
1399			task_ev->buf_len = event->buf_len;
1400		}
1401
1402		task->state = ICE_AQ_TASK_COMPLETE;
1403		found = true;
1404	}
1405	spin_unlock_bh(&pf->aq_wait_lock);
1406
1407	if (found)
1408		wake_up(&pf->aq_wait_queue);
1409}
1410
1411/**
1412 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1413 * @pf: the PF private structure
1414 *
1415 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1416 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1417 */
1418static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1419{
1420	struct ice_aq_task *task;
1421
1422	spin_lock_bh(&pf->aq_wait_lock);
1423	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1424		task->state = ICE_AQ_TASK_CANCELED;
1425	spin_unlock_bh(&pf->aq_wait_lock);
1426
1427	wake_up(&pf->aq_wait_queue);
1428}
1429
1430#define ICE_MBX_OVERFLOW_WATERMARK 64
1431
1432/**
1433 * __ice_clean_ctrlq - helper function to clean controlq rings
1434 * @pf: ptr to struct ice_pf
1435 * @q_type: specific Control queue type
1436 */
1437static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1438{
1439	struct device *dev = ice_pf_to_dev(pf);
1440	struct ice_rq_event_info event;
1441	struct ice_hw *hw = &pf->hw;
1442	struct ice_ctl_q_info *cq;
1443	u16 pending, i = 0;
1444	const char *qtype;
1445	u32 oldval, val;
1446
1447	/* Do not clean control queue if/when PF reset fails */
1448	if (test_bit(ICE_RESET_FAILED, pf->state))
1449		return 0;
1450
1451	switch (q_type) {
1452	case ICE_CTL_Q_ADMIN:
1453		cq = &hw->adminq;
1454		qtype = "Admin";
1455		break;
1456	case ICE_CTL_Q_SB:
1457		cq = &hw->sbq;
1458		qtype = "Sideband";
1459		break;
1460	case ICE_CTL_Q_MAILBOX:
1461		cq = &hw->mailboxq;
1462		qtype = "Mailbox";
1463		/* we are going to try to detect a malicious VF, so set the
1464		 * state to begin detection
1465		 */
1466		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1467		break;
1468	default:
1469		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1470		return 0;
1471	}
1472
1473	/* check for error indications - PF_xx_AxQLEN register layout for
1474	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1475	 */
1476	val = rd32(hw, cq->rq.len);
1477	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1478		   PF_FW_ARQLEN_ARQCRIT_M)) {
1479		oldval = val;
1480		if (val & PF_FW_ARQLEN_ARQVFE_M)
1481			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1482				qtype);
1483		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1484			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1485				qtype);
1486		}
1487		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1488			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1489				qtype);
1490		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1491			 PF_FW_ARQLEN_ARQCRIT_M);
1492		if (oldval != val)
1493			wr32(hw, cq->rq.len, val);
1494	}
1495
1496	val = rd32(hw, cq->sq.len);
1497	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1498		   PF_FW_ATQLEN_ATQCRIT_M)) {
1499		oldval = val;
1500		if (val & PF_FW_ATQLEN_ATQVFE_M)
1501			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1502				qtype);
1503		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1504			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1505				qtype);
1506		}
1507		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1508			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1509				qtype);
1510		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1511			 PF_FW_ATQLEN_ATQCRIT_M);
1512		if (oldval != val)
1513			wr32(hw, cq->sq.len, val);
1514	}
1515
1516	event.buf_len = cq->rq_buf_size;
1517	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1518	if (!event.msg_buf)
1519		return 0;
1520
1521	do {
1522		struct ice_mbx_data data = {};
1523		u16 opcode;
1524		int ret;
1525
1526		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1527		if (ret == -EALREADY)
1528			break;
1529		if (ret) {
1530			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1531				ret);
1532			break;
1533		}
1534
1535		opcode = le16_to_cpu(event.desc.opcode);
1536
1537		/* Notify any thread that might be waiting for this event */
1538		ice_aq_check_events(pf, opcode, &event);
1539
1540		switch (opcode) {
1541		case ice_aqc_opc_get_link_status:
1542			if (ice_handle_link_event(pf, &event))
1543				dev_err(dev, "Could not handle link event\n");
1544			break;
1545		case ice_aqc_opc_event_lan_overflow:
1546			ice_vf_lan_overflow_event(pf, &event);
1547			break;
1548		case ice_mbx_opc_send_msg_to_pf:
1549			if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
1550				ice_vc_process_vf_msg(pf, &event, NULL);
1551				ice_mbx_vf_dec_trig_e830(hw, &event);
1552			} else {
1553				u16 val = hw->mailboxq.num_rq_entries;
1554
1555				data.max_num_msgs_mbx = val;
1556				val = ICE_MBX_OVERFLOW_WATERMARK;
1557				data.async_watermark_val = val;
1558				data.num_msg_proc = i;
1559				data.num_pending_arq = pending;
1560
1561				ice_vc_process_vf_msg(pf, &event, &data);
1562			}
1563			break;
1564		case ice_aqc_opc_fw_logs_event:
1565			ice_get_fwlog_data(pf, &event);
1566			break;
1567		case ice_aqc_opc_lldp_set_mib_change:
1568			ice_dcb_process_lldp_set_mib_change(pf, &event);
1569			break;
1570		default:
1571			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1572				qtype, opcode);
1573			break;
1574		}
1575	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1576
1577	kfree(event.msg_buf);
1578
1579	return pending && (i == ICE_DFLT_IRQ_WORK);
1580}
1581
1582/**
1583 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1584 * @hw: pointer to hardware info
1585 * @cq: control queue information
1586 *
1587 * returns true if there are pending messages in a queue, false if there aren't
1588 */
1589static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1590{
1591	u16 ntu;
1592
1593	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1594	return cq->rq.next_to_clean != ntu;
1595}
1596
1597/**
1598 * ice_clean_adminq_subtask - clean the AdminQ rings
1599 * @pf: board private structure
1600 */
1601static void ice_clean_adminq_subtask(struct ice_pf *pf)
1602{
1603	struct ice_hw *hw = &pf->hw;
1604
1605	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1606		return;
1607
1608	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1609		return;
1610
1611	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1612
1613	/* There might be a situation where new messages arrive to a control
1614	 * queue between processing the last message and clearing the
1615	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1616	 * ice_ctrlq_pending) and process new messages if any.
1617	 */
1618	if (ice_ctrlq_pending(hw, &hw->adminq))
1619		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1620
1621	ice_flush(hw);
1622}
1623
1624/**
1625 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1626 * @pf: board private structure
1627 */
1628static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1629{
1630	struct ice_hw *hw = &pf->hw;
1631
1632	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1633		return;
1634
1635	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1636		return;
1637
1638	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1639
1640	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1641		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1642
1643	ice_flush(hw);
1644}
1645
1646/**
1647 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1648 * @pf: board private structure
1649 */
1650static void ice_clean_sbq_subtask(struct ice_pf *pf)
1651{
1652	struct ice_hw *hw = &pf->hw;
1653
1654	/* if mac_type is not generic, sideband is not supported
1655	 * and there's nothing to do here
1656	 */
1657	if (!ice_is_generic_mac(hw)) {
1658		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1659		return;
1660	}
1661
1662	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1663		return;
1664
1665	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1666		return;
1667
1668	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1669
1670	if (ice_ctrlq_pending(hw, &hw->sbq))
1671		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1672
1673	ice_flush(hw);
1674}
1675
1676/**
1677 * ice_service_task_schedule - schedule the service task to wake up
1678 * @pf: board private structure
1679 *
1680 * If not already scheduled, this puts the task into the work queue.
1681 */
1682void ice_service_task_schedule(struct ice_pf *pf)
1683{
1684	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1685	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1686	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1687		queue_work(ice_wq, &pf->serv_task);
1688}
1689
1690/**
1691 * ice_service_task_complete - finish up the service task
1692 * @pf: board private structure
1693 */
1694static void ice_service_task_complete(struct ice_pf *pf)
1695{
1696	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1697
1698	/* force memory (pf->state) to sync before next service task */
1699	smp_mb__before_atomic();
1700	clear_bit(ICE_SERVICE_SCHED, pf->state);
1701}
1702
1703/**
1704 * ice_service_task_stop - stop service task and cancel works
1705 * @pf: board private structure
1706 *
1707 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1708 * 1 otherwise.
1709 */
1710static int ice_service_task_stop(struct ice_pf *pf)
1711{
1712	int ret;
1713
1714	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1715
1716	if (pf->serv_tmr.function)
1717		del_timer_sync(&pf->serv_tmr);
1718	if (pf->serv_task.func)
1719		cancel_work_sync(&pf->serv_task);
1720
1721	clear_bit(ICE_SERVICE_SCHED, pf->state);
1722	return ret;
1723}
1724
1725/**
1726 * ice_service_task_restart - restart service task and schedule works
1727 * @pf: board private structure
1728 *
1729 * This function is needed for suspend and resume works (e.g WoL scenario)
1730 */
1731static void ice_service_task_restart(struct ice_pf *pf)
1732{
1733	clear_bit(ICE_SERVICE_DIS, pf->state);
1734	ice_service_task_schedule(pf);
1735}
1736
1737/**
1738 * ice_service_timer - timer callback to schedule service task
1739 * @t: pointer to timer_list
1740 */
1741static void ice_service_timer(struct timer_list *t)
1742{
1743	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1744
1745	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1746	ice_service_task_schedule(pf);
1747}
1748
1749/**
1750 * ice_mdd_maybe_reset_vf - reset VF after MDD event
1751 * @pf: pointer to the PF structure
1752 * @vf: pointer to the VF structure
1753 * @reset_vf_tx: whether Tx MDD has occurred
1754 * @reset_vf_rx: whether Rx MDD has occurred
1755 *
1756 * Since the queue can get stuck on VF MDD events, the PF can be configured to
1757 * automatically reset the VF by enabling the private ethtool flag
1758 * mdd-auto-reset-vf.
1759 */
1760static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
1761				   bool reset_vf_tx, bool reset_vf_rx)
1762{
1763	struct device *dev = ice_pf_to_dev(pf);
1764
1765	if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
1766		return;
1767
1768	/* VF MDD event counters will be cleared by reset, so print the event
1769	 * prior to reset.
1770	 */
1771	if (reset_vf_tx)
1772		ice_print_vf_tx_mdd_event(vf);
1773
1774	if (reset_vf_rx)
1775		ice_print_vf_rx_mdd_event(vf);
1776
1777	dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
1778		 pf->hw.pf_id, vf->vf_id);
1779	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1780}
1781
1782/**
1783 * ice_handle_mdd_event - handle malicious driver detect event
1784 * @pf: pointer to the PF structure
1785 *
1786 * Called from service task. OICR interrupt handler indicates MDD event.
1787 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1788 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1789 * disable the queue, the PF can be configured to reset the VF using ethtool
1790 * private flag mdd-auto-reset-vf.
1791 */
1792static void ice_handle_mdd_event(struct ice_pf *pf)
1793{
1794	struct device *dev = ice_pf_to_dev(pf);
1795	struct ice_hw *hw = &pf->hw;
1796	struct ice_vf *vf;
1797	unsigned int bkt;
1798	u32 reg;
1799
1800	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1801		/* Since the VF MDD event logging is rate limited, check if
1802		 * there are pending MDD events.
1803		 */
1804		ice_print_vfs_mdd_events(pf);
1805		return;
1806	}
1807
1808	/* find what triggered an MDD event */
1809	reg = rd32(hw, GL_MDET_TX_PQM);
1810	if (reg & GL_MDET_TX_PQM_VALID_M) {
1811		u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
1812		u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
1813		u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
1814		u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
 
 
 
 
1815
1816		if (netif_msg_tx_err(pf))
1817			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1818				 event, queue, pf_num, vf_num);
1819		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1820	}
1821
1822	reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1823	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1824		u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
1825		u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
1826		u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
1827		u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
 
 
 
 
1828
1829		if (netif_msg_tx_err(pf))
1830			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1831				 event, queue, pf_num, vf_num);
1832		wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1833	}
1834
1835	reg = rd32(hw, GL_MDET_RX);
1836	if (reg & GL_MDET_RX_VALID_M) {
1837		u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
1838		u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
1839		u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
1840		u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
 
 
 
 
1841
1842		if (netif_msg_rx_err(pf))
1843			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1844				 event, queue, pf_num, vf_num);
1845		wr32(hw, GL_MDET_RX, 0xffffffff);
1846	}
1847
1848	/* check to see if this PF caused an MDD event */
1849	reg = rd32(hw, PF_MDET_TX_PQM);
1850	if (reg & PF_MDET_TX_PQM_VALID_M) {
1851		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1852		if (netif_msg_tx_err(pf))
1853			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1854	}
1855
1856	reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1857	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1858		wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1859		if (netif_msg_tx_err(pf))
1860			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1861	}
1862
1863	reg = rd32(hw, PF_MDET_RX);
1864	if (reg & PF_MDET_RX_VALID_M) {
1865		wr32(hw, PF_MDET_RX, 0xFFFF);
1866		if (netif_msg_rx_err(pf))
1867			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1868	}
1869
1870	/* Check to see if one of the VFs caused an MDD event, and then
1871	 * increment counters and set print pending
1872	 */
1873	mutex_lock(&pf->vfs.table_lock);
1874	ice_for_each_vf(pf, bkt, vf) {
1875		bool reset_vf_tx = false, reset_vf_rx = false;
1876
1877		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1878		if (reg & VP_MDET_TX_PQM_VALID_M) {
1879			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1880			vf->mdd_tx_events.count++;
1881			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1882			if (netif_msg_tx_err(pf))
1883				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1884					 vf->vf_id);
1885
1886			reset_vf_tx = true;
1887		}
1888
1889		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1890		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1891			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1892			vf->mdd_tx_events.count++;
1893			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1894			if (netif_msg_tx_err(pf))
1895				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1896					 vf->vf_id);
1897
1898			reset_vf_tx = true;
1899		}
1900
1901		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1902		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1903			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1904			vf->mdd_tx_events.count++;
1905			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1906			if (netif_msg_tx_err(pf))
1907				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1908					 vf->vf_id);
1909
1910			reset_vf_tx = true;
1911		}
1912
1913		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1914		if (reg & VP_MDET_RX_VALID_M) {
1915			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1916			vf->mdd_rx_events.count++;
1917			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1918			if (netif_msg_rx_err(pf))
1919				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1920					 vf->vf_id);
1921
1922			reset_vf_rx = true;
 
 
 
 
 
 
 
 
 
 
1923		}
1924
1925		if (reset_vf_tx || reset_vf_rx)
1926			ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
1927					       reset_vf_rx);
1928	}
1929	mutex_unlock(&pf->vfs.table_lock);
1930
1931	ice_print_vfs_mdd_events(pf);
1932}
1933
1934/**
1935 * ice_force_phys_link_state - Force the physical link state
1936 * @vsi: VSI to force the physical link state to up/down
1937 * @link_up: true/false indicates to set the physical link to up/down
1938 *
1939 * Force the physical link state by getting the current PHY capabilities from
1940 * hardware and setting the PHY config based on the determined capabilities. If
1941 * link changes a link event will be triggered because both the Enable Automatic
1942 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1943 *
1944 * Returns 0 on success, negative on failure
1945 */
1946static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1947{
1948	struct ice_aqc_get_phy_caps_data *pcaps;
1949	struct ice_aqc_set_phy_cfg_data *cfg;
1950	struct ice_port_info *pi;
1951	struct device *dev;
1952	int retcode;
1953
1954	if (!vsi || !vsi->port_info || !vsi->back)
1955		return -EINVAL;
1956	if (vsi->type != ICE_VSI_PF)
1957		return 0;
1958
1959	dev = ice_pf_to_dev(vsi->back);
1960
1961	pi = vsi->port_info;
1962
1963	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1964	if (!pcaps)
1965		return -ENOMEM;
1966
1967	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1968				      NULL);
1969	if (retcode) {
1970		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1971			vsi->vsi_num, retcode);
1972		retcode = -EIO;
1973		goto out;
1974	}
1975
1976	/* No change in link */
1977	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1978	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1979		goto out;
1980
1981	/* Use the current user PHY configuration. The current user PHY
1982	 * configuration is initialized during probe from PHY capabilities
1983	 * software mode, and updated on set PHY configuration.
1984	 */
1985	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1986	if (!cfg) {
1987		retcode = -ENOMEM;
1988		goto out;
1989	}
1990
1991	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1992	if (link_up)
1993		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1994	else
1995		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1996
1997	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1998	if (retcode) {
1999		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2000			vsi->vsi_num, retcode);
2001		retcode = -EIO;
2002	}
2003
2004	kfree(cfg);
2005out:
2006	kfree(pcaps);
2007	return retcode;
2008}
2009
2010/**
2011 * ice_init_nvm_phy_type - Initialize the NVM PHY type
2012 * @pi: port info structure
2013 *
2014 * Initialize nvm_phy_type_[low|high] for link lenient mode support
2015 */
2016static int ice_init_nvm_phy_type(struct ice_port_info *pi)
2017{
2018	struct ice_aqc_get_phy_caps_data *pcaps;
2019	struct ice_pf *pf = pi->hw->back;
2020	int err;
 
2021
2022	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2023	if (!pcaps)
2024		return -ENOMEM;
2025
2026	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
2027				  pcaps, NULL);
2028
2029	if (err) {
2030		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
 
2031		goto out;
2032	}
2033
2034	pf->nvm_phy_type_hi = pcaps->phy_type_high;
2035	pf->nvm_phy_type_lo = pcaps->phy_type_low;
2036
2037out:
2038	kfree(pcaps);
2039	return err;
2040}
2041
2042/**
2043 * ice_init_link_dflt_override - Initialize link default override
2044 * @pi: port info structure
2045 *
2046 * Initialize link default override and PHY total port shutdown during probe
2047 */
2048static void ice_init_link_dflt_override(struct ice_port_info *pi)
2049{
2050	struct ice_link_default_override_tlv *ldo;
2051	struct ice_pf *pf = pi->hw->back;
2052
2053	ldo = &pf->link_dflt_override;
2054	if (ice_get_link_default_override(ldo, pi))
2055		return;
2056
2057	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2058		return;
2059
2060	/* Enable Total Port Shutdown (override/replace link-down-on-close
2061	 * ethtool private flag) for ports with Port Disable bit set.
2062	 */
2063	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2064	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2065}
2066
2067/**
2068 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2069 * @pi: port info structure
2070 *
2071 * If default override is enabled, initialize the user PHY cfg speed and FEC
2072 * settings using the default override mask from the NVM.
2073 *
2074 * The PHY should only be configured with the default override settings the
2075 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2076 * is used to indicate that the user PHY cfg default override is initialized
2077 * and the PHY has not been configured with the default override settings. The
2078 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2079 * configured.
2080 *
2081 * This function should be called only if the FW doesn't support default
2082 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2083 */
2084static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2085{
2086	struct ice_link_default_override_tlv *ldo;
2087	struct ice_aqc_set_phy_cfg_data *cfg;
2088	struct ice_phy_info *phy = &pi->phy;
2089	struct ice_pf *pf = pi->hw->back;
2090
2091	ldo = &pf->link_dflt_override;
2092
2093	/* If link default override is enabled, use to mask NVM PHY capabilities
2094	 * for speed and FEC default configuration.
2095	 */
2096	cfg = &phy->curr_user_phy_cfg;
2097
2098	if (ldo->phy_type_low || ldo->phy_type_high) {
2099		cfg->phy_type_low = pf->nvm_phy_type_lo &
2100				    cpu_to_le64(ldo->phy_type_low);
2101		cfg->phy_type_high = pf->nvm_phy_type_hi &
2102				     cpu_to_le64(ldo->phy_type_high);
2103	}
2104	cfg->link_fec_opt = ldo->fec_options;
2105	phy->curr_user_fec_req = ICE_FEC_AUTO;
2106
2107	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2108}
2109
2110/**
2111 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2112 * @pi: port info structure
2113 *
2114 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2115 * mode to default. The PHY defaults are from get PHY capabilities topology
2116 * with media so call when media is first available. An error is returned if
2117 * called when media is not available. The PHY initialization completed state is
2118 * set here.
2119 *
2120 * These configurations are used when setting PHY
2121 * configuration. The user PHY configuration is updated on set PHY
2122 * configuration. Returns 0 on success, negative on failure
2123 */
2124static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2125{
2126	struct ice_aqc_get_phy_caps_data *pcaps;
2127	struct ice_phy_info *phy = &pi->phy;
2128	struct ice_pf *pf = pi->hw->back;
2129	int err;
 
2130
2131	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2132		return -EIO;
2133
2134	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2135	if (!pcaps)
2136		return -ENOMEM;
2137
2138	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2139		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2140					  pcaps, NULL);
2141	else
2142		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2143					  pcaps, NULL);
2144	if (err) {
2145		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
 
2146		goto err_out;
2147	}
2148
2149	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2150
2151	/* check if lenient mode is supported and enabled */
2152	if (ice_fw_supports_link_override(pi->hw) &&
2153	    !(pcaps->module_compliance_enforcement &
2154	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2155		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2156
2157		/* if the FW supports default PHY configuration mode, then the driver
2158		 * does not have to apply link override settings. If not,
2159		 * initialize user PHY configuration with link override values
2160		 */
2161		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2162		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2163			ice_init_phy_cfg_dflt_override(pi);
2164			goto out;
2165		}
2166	}
2167
2168	/* if link default override is not enabled, set user flow control and
2169	 * FEC settings based on what get_phy_caps returned
2170	 */
2171	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2172						      pcaps->link_fec_options);
2173	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2174
2175out:
2176	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2177	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2178err_out:
2179	kfree(pcaps);
2180	return err;
2181}
2182
2183/**
2184 * ice_configure_phy - configure PHY
2185 * @vsi: VSI of PHY
2186 *
2187 * Set the PHY configuration. If the current PHY configuration is the same as
2188 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2189 * configure the based get PHY capabilities for topology with media.
2190 */
2191static int ice_configure_phy(struct ice_vsi *vsi)
2192{
2193	struct device *dev = ice_pf_to_dev(vsi->back);
2194	struct ice_port_info *pi = vsi->port_info;
2195	struct ice_aqc_get_phy_caps_data *pcaps;
2196	struct ice_aqc_set_phy_cfg_data *cfg;
2197	struct ice_phy_info *phy = &pi->phy;
2198	struct ice_pf *pf = vsi->back;
2199	int err;
 
2200
2201	/* Ensure we have media as we cannot configure a medialess port */
2202	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2203		return -ENOMEDIUM;
2204
2205	ice_print_topo_conflict(vsi);
2206
2207	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2208	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2209		return -EPERM;
2210
2211	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2212		return ice_force_phys_link_state(vsi, true);
2213
2214	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2215	if (!pcaps)
2216		return -ENOMEM;
2217
2218	/* Get current PHY config */
2219	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2220				  NULL);
2221	if (err) {
2222		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2223			vsi->vsi_num, err);
 
2224		goto done;
2225	}
2226
2227	/* If PHY enable link is configured and configuration has not changed,
2228	 * there's nothing to do
2229	 */
2230	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2231	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2232		goto done;
2233
2234	/* Use PHY topology as baseline for configuration */
2235	memset(pcaps, 0, sizeof(*pcaps));
2236	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2237		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2238					  pcaps, NULL);
2239	else
2240		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2241					  pcaps, NULL);
2242	if (err) {
2243		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2244			vsi->vsi_num, err);
 
2245		goto done;
2246	}
2247
2248	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2249	if (!cfg) {
2250		err = -ENOMEM;
2251		goto done;
2252	}
2253
2254	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2255
2256	/* Speed - If default override pending, use curr_user_phy_cfg set in
2257	 * ice_init_phy_user_cfg_ldo.
2258	 */
2259	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2260			       vsi->back->state)) {
2261		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2262		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2263	} else {
2264		u64 phy_low = 0, phy_high = 0;
2265
2266		ice_update_phy_type(&phy_low, &phy_high,
2267				    pi->phy.curr_user_speed_req);
2268		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2269		cfg->phy_type_high = pcaps->phy_type_high &
2270				     cpu_to_le64(phy_high);
2271	}
2272
2273	/* Can't provide what was requested; use PHY capabilities */
2274	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2275		cfg->phy_type_low = pcaps->phy_type_low;
2276		cfg->phy_type_high = pcaps->phy_type_high;
2277	}
2278
2279	/* FEC */
2280	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2281
2282	/* Can't provide what was requested; use PHY capabilities */
2283	if (cfg->link_fec_opt !=
2284	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2285		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2286		cfg->link_fec_opt = pcaps->link_fec_options;
2287	}
2288
2289	/* Flow Control - always supported; no need to check against
2290	 * capabilities
2291	 */
2292	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2293
2294	/* Enable link and link update */
2295	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2296
2297	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2298	if (err)
2299		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2300			vsi->vsi_num, err);
 
 
2301
2302	kfree(cfg);
2303done:
2304	kfree(pcaps);
2305	return err;
2306}
2307
2308/**
2309 * ice_check_media_subtask - Check for media
2310 * @pf: pointer to PF struct
2311 *
2312 * If media is available, then initialize PHY user configuration if it is not
2313 * been, and configure the PHY if the interface is up.
2314 */
2315static void ice_check_media_subtask(struct ice_pf *pf)
2316{
2317	struct ice_port_info *pi;
2318	struct ice_vsi *vsi;
2319	int err;
2320
2321	/* No need to check for media if it's already present */
2322	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2323		return;
2324
2325	vsi = ice_get_main_vsi(pf);
2326	if (!vsi)
2327		return;
2328
2329	/* Refresh link info and check if media is present */
2330	pi = vsi->port_info;
2331	err = ice_update_link_info(pi);
2332	if (err)
2333		return;
2334
2335	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2336
2337	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2338		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2339			ice_init_phy_user_cfg(pi);
2340
2341		/* PHY settings are reset on media insertion, reconfigure
2342		 * PHY to preserve settings.
2343		 */
2344		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2345		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2346			return;
2347
2348		err = ice_configure_phy(vsi);
2349		if (!err)
2350			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2351
2352		/* A Link Status Event will be generated; the event handler
2353		 * will complete bringing the interface up
2354		 */
2355	}
2356}
2357
2358/**
2359 * ice_service_task - manage and run subtasks
2360 * @work: pointer to work_struct contained by the PF struct
2361 */
2362static void ice_service_task(struct work_struct *work)
2363{
2364	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2365	unsigned long start_time = jiffies;
2366
2367	/* subtasks */
2368
2369	/* process reset requests first */
2370	ice_reset_subtask(pf);
2371
2372	/* bail if a reset/recovery cycle is pending or rebuild failed */
2373	if (ice_is_reset_in_progress(pf->state) ||
2374	    test_bit(ICE_SUSPENDED, pf->state) ||
2375	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2376		ice_service_task_complete(pf);
2377		return;
2378	}
2379
2380	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2381		struct iidc_event *event;
2382
2383		event = kzalloc(sizeof(*event), GFP_KERNEL);
2384		if (event) {
2385			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2386			/* report the entire OICR value to AUX driver */
2387			swap(event->reg, pf->oicr_err_reg);
2388			ice_send_event_to_aux(pf, event);
2389			kfree(event);
2390		}
2391	}
2392
2393	/* unplug aux dev per request, if an unplug request came in
2394	 * while processing a plug request, this will handle it
2395	 */
2396	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2397		ice_unplug_aux_dev(pf);
2398
2399	/* Plug aux device per request */
2400	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2401		ice_plug_aux_dev(pf);
2402
2403	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2404		struct iidc_event *event;
2405
2406		event = kzalloc(sizeof(*event), GFP_KERNEL);
2407		if (event) {
2408			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2409			ice_send_event_to_aux(pf, event);
2410			kfree(event);
2411		}
2412	}
2413
2414	ice_clean_adminq_subtask(pf);
2415	ice_check_media_subtask(pf);
2416	ice_check_for_hang_subtask(pf);
2417	ice_sync_fltr_subtask(pf);
2418	ice_handle_mdd_event(pf);
2419	ice_watchdog_subtask(pf);
2420
2421	if (ice_is_safe_mode(pf)) {
2422		ice_service_task_complete(pf);
2423		return;
2424	}
2425
2426	ice_process_vflr_event(pf);
2427	ice_clean_mailboxq_subtask(pf);
2428	ice_clean_sbq_subtask(pf);
2429	ice_sync_arfs_fltrs(pf);
2430	ice_flush_fdir_ctx(pf);
2431
2432	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2433	ice_service_task_complete(pf);
2434
2435	/* If the tasks have taken longer than one service timer period
2436	 * or there is more work to be done, reset the service timer to
2437	 * schedule the service task now.
2438	 */
2439	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2440	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2441	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2442	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2443	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2444	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2445	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2446		mod_timer(&pf->serv_tmr, jiffies);
2447}
2448
2449/**
2450 * ice_set_ctrlq_len - helper function to set controlq length
2451 * @hw: pointer to the HW instance
2452 */
2453static void ice_set_ctrlq_len(struct ice_hw *hw)
2454{
2455	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2456	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2457	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2458	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2459	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2460	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2461	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2462	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2463	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2464	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2465	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2466	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2467}
2468
2469/**
2470 * ice_schedule_reset - schedule a reset
2471 * @pf: board private structure
2472 * @reset: reset being requested
2473 */
2474int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2475{
2476	struct device *dev = ice_pf_to_dev(pf);
2477
2478	/* bail out if earlier reset has failed */
2479	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2480		dev_dbg(dev, "earlier reset has failed\n");
2481		return -EIO;
2482	}
2483	/* bail if reset/recovery already in progress */
2484	if (ice_is_reset_in_progress(pf->state)) {
2485		dev_dbg(dev, "Reset already in progress\n");
2486		return -EBUSY;
2487	}
2488
 
 
2489	switch (reset) {
2490	case ICE_RESET_PFR:
2491		set_bit(ICE_PFR_REQ, pf->state);
2492		break;
2493	case ICE_RESET_CORER:
2494		set_bit(ICE_CORER_REQ, pf->state);
2495		break;
2496	case ICE_RESET_GLOBR:
2497		set_bit(ICE_GLOBR_REQ, pf->state);
2498		break;
2499	default:
2500		return -EINVAL;
2501	}
2502
2503	ice_service_task_schedule(pf);
2504	return 0;
2505}
2506
2507/**
2508 * ice_irq_affinity_notify - Callback for affinity changes
2509 * @notify: context as to what irq was changed
2510 * @mask: the new affinity mask
2511 *
2512 * This is a callback function used by the irq_set_affinity_notifier function
2513 * so that we may register to receive changes to the irq affinity masks.
2514 */
2515static void
2516ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2517			const cpumask_t *mask)
2518{
2519	struct ice_q_vector *q_vector =
2520		container_of(notify, struct ice_q_vector, affinity_notify);
2521
2522	cpumask_copy(&q_vector->affinity_mask, mask);
2523}
2524
2525/**
2526 * ice_irq_affinity_release - Callback for affinity notifier release
2527 * @ref: internal core kernel usage
2528 *
2529 * This is a callback function used by the irq_set_affinity_notifier function
2530 * to inform the current notification subscriber that they will no longer
2531 * receive notifications.
2532 */
2533static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2534
2535/**
2536 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2537 * @vsi: the VSI being configured
2538 */
2539static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2540{
2541	struct ice_hw *hw = &vsi->back->hw;
2542	int i;
2543
2544	ice_for_each_q_vector(vsi, i)
2545		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2546
2547	ice_flush(hw);
2548	return 0;
2549}
2550
2551/**
2552 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2553 * @vsi: the VSI being configured
2554 * @basename: name for the vector
2555 */
2556static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2557{
2558	int q_vectors = vsi->num_q_vectors;
2559	struct ice_pf *pf = vsi->back;
 
2560	struct device *dev;
2561	int rx_int_idx = 0;
2562	int tx_int_idx = 0;
2563	int vector, err;
2564	int irq_num;
2565
2566	dev = ice_pf_to_dev(pf);
2567	for (vector = 0; vector < q_vectors; vector++) {
2568		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2569
2570		irq_num = q_vector->irq.virq;
2571
2572		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2573			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2574				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2575			tx_int_idx++;
2576		} else if (q_vector->rx.rx_ring) {
2577			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2578				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2579		} else if (q_vector->tx.tx_ring) {
2580			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2581				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2582		} else {
2583			/* skip this unused q_vector */
2584			continue;
2585		}
2586		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2587			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2588					       IRQF_SHARED, q_vector->name,
2589					       q_vector);
2590		else
2591			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2592					       0, q_vector->name, q_vector);
2593		if (err) {
2594			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2595				   err);
2596			goto free_q_irqs;
2597		}
2598
2599		/* register for affinity change notifications */
2600		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2601			struct irq_affinity_notify *affinity_notify;
2602
2603			affinity_notify = &q_vector->affinity_notify;
2604			affinity_notify->notify = ice_irq_affinity_notify;
2605			affinity_notify->release = ice_irq_affinity_release;
2606			irq_set_affinity_notifier(irq_num, affinity_notify);
2607		}
2608
2609		/* assign the mask for this irq */
2610		irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
2611	}
2612
2613	err = ice_set_cpu_rx_rmap(vsi);
2614	if (err) {
2615		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2616			   vsi->vsi_num, ERR_PTR(err));
2617		goto free_q_irqs;
2618	}
2619
2620	vsi->irqs_ready = true;
2621	return 0;
2622
2623free_q_irqs:
2624	while (vector--) {
2625		irq_num = vsi->q_vectors[vector]->irq.virq;
 
2626		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2627			irq_set_affinity_notifier(irq_num, NULL);
2628		irq_update_affinity_hint(irq_num, NULL);
2629		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2630	}
2631	return err;
2632}
2633
2634/**
2635 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2636 * @vsi: VSI to setup Tx rings used by XDP
2637 *
2638 * Return 0 on success and negative value on error
2639 */
2640static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2641{
2642	struct device *dev = ice_pf_to_dev(vsi->back);
2643	struct ice_tx_desc *tx_desc;
2644	int i, j;
2645
2646	ice_for_each_xdp_txq(vsi, i) {
2647		u16 xdp_q_idx = vsi->alloc_txq + i;
2648		struct ice_ring_stats *ring_stats;
2649		struct ice_tx_ring *xdp_ring;
2650
2651		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2652		if (!xdp_ring)
2653			goto free_xdp_rings;
2654
2655		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2656		if (!ring_stats) {
2657			ice_free_tx_ring(xdp_ring);
2658			goto free_xdp_rings;
2659		}
2660
2661		xdp_ring->ring_stats = ring_stats;
2662		xdp_ring->q_index = xdp_q_idx;
2663		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
 
2664		xdp_ring->vsi = vsi;
2665		xdp_ring->netdev = NULL;
2666		xdp_ring->dev = dev;
2667		xdp_ring->count = vsi->num_tx_desc;
2668		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2669		if (ice_setup_tx_ring(xdp_ring))
2670			goto free_xdp_rings;
2671		ice_set_ring_xdp(xdp_ring);
2672		spin_lock_init(&xdp_ring->tx_lock);
2673		for (j = 0; j < xdp_ring->count; j++) {
2674			tx_desc = ICE_TX_DESC(xdp_ring, j);
2675			tx_desc->cmd_type_offset_bsz = 0;
2676		}
2677	}
2678
2679	return 0;
2680
2681free_xdp_rings:
2682	for (; i >= 0; i--) {
2683		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2684			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2685			vsi->xdp_rings[i]->ring_stats = NULL;
2686			ice_free_tx_ring(vsi->xdp_rings[i]);
2687		}
2688	}
2689	return -ENOMEM;
2690}
2691
2692/**
2693 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2694 * @vsi: VSI to set the bpf prog on
2695 * @prog: the bpf prog pointer
2696 */
2697static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2698{
2699	struct bpf_prog *old_prog;
2700	int i;
2701
2702	old_prog = xchg(&vsi->xdp_prog, prog);
2703	ice_for_each_rxq(vsi, i)
2704		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2705
2706	if (old_prog)
2707		bpf_prog_put(old_prog);
2708}
2709
2710static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2711{
2712	struct ice_q_vector *q_vector;
2713	struct ice_tx_ring *ring;
2714
2715	if (static_key_enabled(&ice_xdp_locking_key))
2716		return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2717
2718	q_vector = vsi->rx_rings[qid]->q_vector;
2719	ice_for_each_tx_ring(ring, q_vector->tx)
2720		if (ice_ring_is_xdp(ring))
2721			return ring;
2722
2723	return NULL;
2724}
2725
2726/**
2727 * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2728 * @vsi: the VSI with XDP rings being configured
2729 *
2730 * Map XDP rings to interrupt vectors and perform the configuration steps
2731 * dependent on the mapping.
2732 */
2733void ice_map_xdp_rings(struct ice_vsi *vsi)
2734{
2735	int xdp_rings_rem = vsi->num_xdp_txq;
2736	int v_idx, q_idx;
2737
2738	/* follow the logic from ice_vsi_map_rings_to_vectors */
2739	ice_for_each_q_vector(vsi, v_idx) {
2740		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2741		int xdp_rings_per_v, q_id, q_base;
2742
2743		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2744					       vsi->num_q_vectors - v_idx);
2745		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2746
2747		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2748			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2749
2750			xdp_ring->q_vector = q_vector;
2751			xdp_ring->next = q_vector->tx.tx_ring;
2752			q_vector->tx.tx_ring = xdp_ring;
2753		}
2754		xdp_rings_rem -= xdp_rings_per_v;
2755	}
2756
2757	ice_for_each_rxq(vsi, q_idx) {
2758		vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2759								       q_idx);
2760		ice_tx_xsk_pool(vsi, q_idx);
2761	}
2762}
2763
2764/**
2765 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2766 * @vsi: VSI to bring up Tx rings used by XDP
2767 * @prog: bpf program that will be assigned to VSI
2768 * @cfg_type: create from scratch or restore the existing configuration
2769 *
2770 * Return 0 on success and negative value on error
2771 */
2772int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2773			  enum ice_xdp_cfg cfg_type)
2774{
2775	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
 
2776	struct ice_pf *pf = vsi->back;
2777	struct ice_qs_cfg xdp_qs_cfg = {
2778		.qs_mutex = &pf->avail_q_mutex,
2779		.pf_map = pf->avail_txqs,
2780		.pf_map_size = pf->max_pf_txqs,
2781		.q_count = vsi->num_xdp_txq,
2782		.scatter_count = ICE_MAX_SCATTER_TXQS,
2783		.vsi_map = vsi->txq_map,
2784		.vsi_map_offset = vsi->alloc_txq,
2785		.mapping_mode = ICE_VSI_MAP_CONTIG
2786	};
 
2787	struct device *dev;
2788	int status, i;
2789
2790	dev = ice_pf_to_dev(pf);
2791	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2792				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2793	if (!vsi->xdp_rings)
2794		return -ENOMEM;
2795
2796	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2797	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2798		goto err_map_xdp;
2799
2800	if (static_key_enabled(&ice_xdp_locking_key))
2801		netdev_warn(vsi->netdev,
2802			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2803
2804	if (ice_xdp_alloc_setup_rings(vsi))
2805		goto clear_xdp_rings;
2806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2807	/* omit the scheduler update if in reset path; XDP queues will be
2808	 * taken into account at the end of ice_vsi_rebuild, where
2809	 * ice_cfg_vsi_lan is being called
2810	 */
2811	if (cfg_type == ICE_XDP_CFG_PART)
2812		return 0;
2813
2814	ice_map_xdp_rings(vsi);
2815
2816	/* tell the Tx scheduler that right now we have
2817	 * additional queues
2818	 */
2819	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2820		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2821
2822	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2823				 max_txqs);
2824	if (status) {
2825		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2826			status);
2827		goto clear_xdp_rings;
2828	}
2829
2830	/* assign the prog only when it's not already present on VSI;
2831	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2832	 * VSI rebuild that happens under ethtool -L can expose us to
2833	 * the bpf_prog refcount issues as we would be swapping same
2834	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2835	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2836	 * this is not harmful as dev_xdp_install bumps the refcount
2837	 * before calling the op exposed by the driver;
2838	 */
2839	if (!ice_is_xdp_ena_vsi(vsi))
2840		ice_vsi_assign_bpf_prog(vsi, prog);
2841
2842	return 0;
2843clear_xdp_rings:
2844	ice_for_each_xdp_txq(vsi, i)
2845		if (vsi->xdp_rings[i]) {
2846			kfree_rcu(vsi->xdp_rings[i], rcu);
2847			vsi->xdp_rings[i] = NULL;
2848		}
2849
2850err_map_xdp:
2851	mutex_lock(&pf->avail_q_mutex);
2852	ice_for_each_xdp_txq(vsi, i) {
2853		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2854		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2855	}
2856	mutex_unlock(&pf->avail_q_mutex);
2857
2858	devm_kfree(dev, vsi->xdp_rings);
2859	return -ENOMEM;
2860}
2861
2862/**
2863 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2864 * @vsi: VSI to remove XDP rings
2865 * @cfg_type: disable XDP permanently or allow it to be restored later
2866 *
2867 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2868 * resources
2869 */
2870int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2871{
2872	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2873	struct ice_pf *pf = vsi->back;
2874	int i, v_idx;
2875
2876	/* q_vectors are freed in reset path so there's no point in detaching
2877	 * rings
 
 
2878	 */
2879	if (cfg_type == ICE_XDP_CFG_PART)
2880		goto free_qmap;
2881
2882	ice_for_each_q_vector(vsi, v_idx) {
2883		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2884		struct ice_tx_ring *ring;
2885
2886		ice_for_each_tx_ring(ring, q_vector->tx)
2887			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2888				break;
2889
2890		/* restore the value of last node prior to XDP setup */
2891		q_vector->tx.tx_ring = ring;
2892	}
2893
2894free_qmap:
2895	mutex_lock(&pf->avail_q_mutex);
2896	ice_for_each_xdp_txq(vsi, i) {
2897		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2898		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2899	}
2900	mutex_unlock(&pf->avail_q_mutex);
2901
2902	ice_for_each_xdp_txq(vsi, i)
2903		if (vsi->xdp_rings[i]) {
2904			if (vsi->xdp_rings[i]->desc) {
2905				synchronize_rcu();
2906				ice_free_tx_ring(vsi->xdp_rings[i]);
2907			}
2908			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2909			vsi->xdp_rings[i]->ring_stats = NULL;
2910			kfree_rcu(vsi->xdp_rings[i], rcu);
2911			vsi->xdp_rings[i] = NULL;
2912		}
2913
2914	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2915	vsi->xdp_rings = NULL;
2916
2917	if (static_key_enabled(&ice_xdp_locking_key))
2918		static_branch_dec(&ice_xdp_locking_key);
2919
2920	if (cfg_type == ICE_XDP_CFG_PART)
2921		return 0;
2922
2923	ice_vsi_assign_bpf_prog(vsi, NULL);
2924
2925	/* notify Tx scheduler that we destroyed XDP queues and bring
2926	 * back the old number of child nodes
2927	 */
2928	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2929		max_txqs[i] = vsi->num_txq;
2930
2931	/* change number of XDP Tx queues to 0 */
2932	vsi->num_xdp_txq = 0;
2933
2934	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2935			       max_txqs);
2936}
2937
2938/**
2939 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2940 * @vsi: VSI to schedule napi on
2941 */
2942static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2943{
2944	int i;
2945
2946	ice_for_each_rxq(vsi, i) {
2947		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2948
2949		if (READ_ONCE(rx_ring->xsk_pool))
2950			napi_schedule(&rx_ring->q_vector->napi);
2951	}
2952}
2953
2954/**
2955 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2956 * @vsi: VSI to determine the count of XDP Tx qs
2957 *
2958 * returns 0 if Tx qs count is higher than at least half of CPU count,
2959 * -ENOMEM otherwise
2960 */
2961int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2962{
2963	u16 avail = ice_get_avail_txq_count(vsi->back);
2964	u16 cpus = num_possible_cpus();
2965
2966	if (avail < cpus / 2)
2967		return -ENOMEM;
2968
2969	if (vsi->type == ICE_VSI_SF)
2970		avail = vsi->alloc_txq;
2971
2972	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2973
2974	if (vsi->num_xdp_txq < cpus)
2975		static_branch_inc(&ice_xdp_locking_key);
2976
2977	return 0;
2978}
2979
2980/**
2981 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2982 * @vsi: Pointer to VSI structure
2983 */
2984static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2985{
2986	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2987		return ICE_RXBUF_1664;
2988	else
2989		return ICE_RXBUF_3072;
2990}
2991
2992/**
2993 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2994 * @vsi: VSI to setup XDP for
2995 * @prog: XDP program
2996 * @extack: netlink extended ack
2997 */
2998static int
2999ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
3000		   struct netlink_ext_ack *extack)
3001{
3002	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
 
3003	int ret = 0, xdp_ring_err = 0;
3004	bool if_running;
3005
3006	if (prog && !prog->aux->xdp_has_frags) {
3007		if (frame_size > ice_max_xdp_frame_size(vsi)) {
3008			NL_SET_ERR_MSG_MOD(extack,
3009					   "MTU is too large for linear frames and XDP prog does not support frags");
3010			return -EOPNOTSUPP;
3011		}
3012	}
3013
3014	/* hot swap progs and avoid toggling link */
3015	if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
3016	    test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
3017		ice_vsi_assign_bpf_prog(vsi, prog);
3018		return 0;
3019	}
3020
3021	if_running = netif_running(vsi->netdev) &&
3022		     !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
3023
3024	/* need to stop netdev while setting up the program for Rx rings */
3025	if (if_running) {
3026		ret = ice_down(vsi);
3027		if (ret) {
3028			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
3029			return ret;
3030		}
3031	}
3032
3033	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
3034		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
3035		if (xdp_ring_err) {
3036			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
3037		} else {
3038			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
3039							     ICE_XDP_CFG_FULL);
3040			if (xdp_ring_err)
3041				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
3042		}
3043		xdp_features_set_redirect_target(vsi->netdev, true);
3044		/* reallocate Rx queues that are used for zero-copy */
3045		xdp_ring_err = ice_realloc_zc_buf(vsi, true);
3046		if (xdp_ring_err)
3047			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
3048	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
3049		xdp_features_clear_redirect_target(vsi->netdev);
3050		xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
3051		if (xdp_ring_err)
3052			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
3053		/* reallocate Rx queues that were used for zero-copy */
3054		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
3055		if (xdp_ring_err)
3056			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
3057	}
3058
3059	if (if_running)
3060		ret = ice_up(vsi);
3061
3062	if (!ret && prog)
3063		ice_vsi_rx_napi_schedule(vsi);
3064
3065	return (ret || xdp_ring_err) ? -ENOMEM : 0;
3066}
3067
3068/**
3069 * ice_xdp_safe_mode - XDP handler for safe mode
3070 * @dev: netdevice
3071 * @xdp: XDP command
3072 */
3073static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3074			     struct netdev_bpf *xdp)
3075{
3076	NL_SET_ERR_MSG_MOD(xdp->extack,
3077			   "Please provide working DDP firmware package in order to use XDP\n"
3078			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3079	return -EOPNOTSUPP;
3080}
3081
3082/**
3083 * ice_xdp - implements XDP handler
3084 * @dev: netdevice
3085 * @xdp: XDP command
3086 */
3087int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3088{
3089	struct ice_netdev_priv *np = netdev_priv(dev);
3090	struct ice_vsi *vsi = np->vsi;
3091	int ret;
3092
3093	if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
3094		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
3095		return -EINVAL;
3096	}
3097
3098	mutex_lock(&vsi->xdp_state_lock);
3099
3100	switch (xdp->command) {
3101	case XDP_SETUP_PROG:
3102		ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3103		break;
3104	case XDP_SETUP_XSK_POOL:
3105		ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
3106		break;
3107	default:
3108		ret = -EINVAL;
3109	}
3110
3111	mutex_unlock(&vsi->xdp_state_lock);
3112	return ret;
3113}
3114
3115/**
3116 * ice_ena_misc_vector - enable the non-queue interrupts
3117 * @pf: board private structure
3118 */
3119static void ice_ena_misc_vector(struct ice_pf *pf)
3120{
3121	struct ice_hw *hw = &pf->hw;
3122	u32 pf_intr_start_offset;
3123	u32 val;
3124
3125	/* Disable anti-spoof detection interrupt to prevent spurious event
3126	 * interrupts during a function reset. Anti-spoof functionally is
3127	 * still supported.
3128	 */
3129	val = rd32(hw, GL_MDCK_TX_TDPU);
3130	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3131	wr32(hw, GL_MDCK_TX_TDPU, val);
3132
3133	/* clear things first */
3134	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
3135	rd32(hw, PFINT_OICR);		/* read to clear */
3136
3137	val = (PFINT_OICR_ECC_ERR_M |
3138	       PFINT_OICR_MAL_DETECT_M |
3139	       PFINT_OICR_GRST_M |
3140	       PFINT_OICR_PCI_EXCEPTION_M |
3141	       PFINT_OICR_VFLR_M |
3142	       PFINT_OICR_HMC_ERR_M |
3143	       PFINT_OICR_PE_PUSH_M |
3144	       PFINT_OICR_PE_CRITERR_M);
3145
3146	wr32(hw, PFINT_OICR_ENA, val);
3147
3148	/* SW_ITR_IDX = 0, but don't change INTENA */
3149	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3150	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3151
3152	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3153		return;
3154	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3155	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3156	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3157}
3158
3159/**
3160 * ice_ll_ts_intr - ll_ts interrupt handler
3161 * @irq: interrupt number
3162 * @data: pointer to a q_vector
3163 */
3164static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
3165{
3166	struct ice_pf *pf = data;
3167	u32 pf_intr_start_offset;
3168	struct ice_ptp_tx *tx;
3169	unsigned long flags;
3170	struct ice_hw *hw;
3171	u32 val;
3172	u8 idx;
3173
3174	hw = &pf->hw;
3175	tx = &pf->ptp.port.tx;
3176	spin_lock_irqsave(&tx->lock, flags);
3177	ice_ptp_complete_tx_single_tstamp(tx);
3178
3179	idx = find_next_bit_wrap(tx->in_use, tx->len,
3180				 tx->last_ll_ts_idx_read + 1);
3181	if (idx != tx->len)
3182		ice_ptp_req_tx_single_tstamp(tx, idx);
3183	spin_unlock_irqrestore(&tx->lock, flags);
3184
3185	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3186	      (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
3187	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3188	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3189	     val);
3190
3191	return IRQ_HANDLED;
3192}
3193
3194/**
3195 * ice_misc_intr - misc interrupt handler
3196 * @irq: interrupt number
3197 * @data: pointer to a q_vector
3198 */
3199static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3200{
3201	struct ice_pf *pf = (struct ice_pf *)data;
3202	irqreturn_t ret = IRQ_HANDLED;
3203	struct ice_hw *hw = &pf->hw;
 
3204	struct device *dev;
3205	u32 oicr, ena_mask;
3206
3207	dev = ice_pf_to_dev(pf);
3208	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3209	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3210	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3211
3212	oicr = rd32(hw, PFINT_OICR);
3213	ena_mask = rd32(hw, PFINT_OICR_ENA);
3214
3215	if (oicr & PFINT_OICR_SWINT_M) {
3216		ena_mask &= ~PFINT_OICR_SWINT_M;
3217		pf->sw_int_count++;
3218	}
3219
3220	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3221		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3222		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3223	}
3224	if (oicr & PFINT_OICR_VFLR_M) {
3225		/* disable any further VFLR event notifications */
3226		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3227			u32 reg = rd32(hw, PFINT_OICR_ENA);
3228
3229			reg &= ~PFINT_OICR_VFLR_M;
3230			wr32(hw, PFINT_OICR_ENA, reg);
3231		} else {
3232			ena_mask &= ~PFINT_OICR_VFLR_M;
3233			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3234		}
3235	}
3236
3237	if (oicr & PFINT_OICR_GRST_M) {
3238		u32 reset;
3239
3240		/* we have a reset warning */
3241		ena_mask &= ~PFINT_OICR_GRST_M;
3242		reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
3243				  rd32(hw, GLGEN_RSTAT));
3244
3245		if (reset == ICE_RESET_CORER)
3246			pf->corer_count++;
3247		else if (reset == ICE_RESET_GLOBR)
3248			pf->globr_count++;
3249		else if (reset == ICE_RESET_EMPR)
3250			pf->empr_count++;
3251		else
3252			dev_dbg(dev, "Invalid reset type %d\n", reset);
3253
3254		/* If a reset cycle isn't already in progress, we set a bit in
3255		 * pf->state so that the service task can start a reset/rebuild.
3256		 */
3257		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3258			if (reset == ICE_RESET_CORER)
3259				set_bit(ICE_CORER_RECV, pf->state);
3260			else if (reset == ICE_RESET_GLOBR)
3261				set_bit(ICE_GLOBR_RECV, pf->state);
3262			else
3263				set_bit(ICE_EMPR_RECV, pf->state);
3264
3265			/* There are couple of different bits at play here.
3266			 * hw->reset_ongoing indicates whether the hardware is
3267			 * in reset. This is set to true when a reset interrupt
3268			 * is received and set back to false after the driver
3269			 * has determined that the hardware is out of reset.
3270			 *
3271			 * ICE_RESET_OICR_RECV in pf->state indicates
3272			 * that a post reset rebuild is required before the
3273			 * driver is operational again. This is set above.
3274			 *
3275			 * As this is the start of the reset/rebuild cycle, set
3276			 * both to indicate that.
3277			 */
3278			hw->reset_ongoing = true;
3279		}
3280	}
3281
3282	if (oicr & PFINT_OICR_TSYN_TX_M) {
3283		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3284		if (ice_pf_state_is_nominal(pf) &&
3285		    pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
3286			struct ice_ptp_tx *tx = &pf->ptp.port.tx;
3287			unsigned long flags;
3288			u8 idx;
3289
3290			spin_lock_irqsave(&tx->lock, flags);
3291			idx = find_next_bit_wrap(tx->in_use, tx->len,
3292						 tx->last_ll_ts_idx_read + 1);
3293			if (idx != tx->len)
3294				ice_ptp_req_tx_single_tstamp(tx, idx);
3295			spin_unlock_irqrestore(&tx->lock, flags);
3296		} else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
3297			set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3298			ret = IRQ_WAKE_THREAD;
3299		}
3300	}
3301
3302	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3303		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3304		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3305
 
 
 
 
3306		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3307
3308		if (ice_pf_src_tmr_owned(pf)) {
3309			/* Save EVENTs from GLTSYN register */
3310			pf->ptp.ext_ts_irq |= gltsyn_stat &
3311					      (GLTSYN_STAT_EVENT0_M |
3312					       GLTSYN_STAT_EVENT1_M |
3313					       GLTSYN_STAT_EVENT2_M);
3314
3315			ice_ptp_extts_event(pf);
3316		}
3317	}
3318
3319#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3320	if (oicr & ICE_AUX_CRIT_ERR) {
3321		pf->oicr_err_reg |= oicr;
3322		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3323		ena_mask &= ~ICE_AUX_CRIT_ERR;
 
 
 
 
 
 
 
 
3324	}
3325
3326	/* Report any remaining unexpected interrupts */
3327	oicr &= ena_mask;
3328	if (oicr) {
3329		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3330		/* If a critical error is pending there is no choice but to
3331		 * reset the device.
3332		 */
3333		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3334			    PFINT_OICR_ECC_ERR_M)) {
3335			set_bit(ICE_PFR_REQ, pf->state);
 
3336		}
3337	}
3338	ice_service_task_schedule(pf);
3339	if (ret == IRQ_HANDLED)
3340		ice_irq_dynamic_ena(hw, NULL, NULL);
3341
3342	return ret;
3343}
3344
3345/**
3346 * ice_misc_intr_thread_fn - misc interrupt thread function
3347 * @irq: interrupt number
3348 * @data: pointer to a q_vector
3349 */
3350static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3351{
3352	struct ice_pf *pf = data;
3353	struct ice_hw *hw;
3354
3355	hw = &pf->hw;
3356
3357	if (ice_is_reset_in_progress(pf->state))
3358		goto skip_irq;
3359
3360	if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3361		/* Process outstanding Tx timestamps. If there is more work,
3362		 * re-arm the interrupt to trigger again.
3363		 */
3364		if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3365			wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3366			ice_flush(hw);
3367		}
3368	}
3369
3370skip_irq:
3371	ice_irq_dynamic_ena(hw, NULL, NULL);
3372
3373	return IRQ_HANDLED;
3374}
3375
3376/**
3377 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3378 * @hw: pointer to HW structure
3379 */
3380static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3381{
3382	/* disable Admin queue Interrupt causes */
3383	wr32(hw, PFINT_FW_CTL,
3384	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3385
3386	/* disable Mailbox queue Interrupt causes */
3387	wr32(hw, PFINT_MBX_CTL,
3388	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3389
3390	wr32(hw, PFINT_SB_CTL,
3391	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3392
3393	/* disable Control queue Interrupt causes */
3394	wr32(hw, PFINT_OICR_CTL,
3395	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3396
3397	ice_flush(hw);
3398}
3399
3400/**
3401 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3402 * @pf: board private structure
3403 */
3404static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3405{
3406	int irq_num = pf->ll_ts_irq.virq;
3407
3408	synchronize_irq(irq_num);
3409	devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3410
3411	ice_free_irq(pf, pf->ll_ts_irq);
3412}
3413
3414/**
3415 * ice_free_irq_msix_misc - Unroll misc vector setup
3416 * @pf: board private structure
3417 */
3418static void ice_free_irq_msix_misc(struct ice_pf *pf)
3419{
3420	int misc_irq_num = pf->oicr_irq.virq;
3421	struct ice_hw *hw = &pf->hw;
3422
3423	ice_dis_ctrlq_interrupts(hw);
3424
3425	/* disable OICR interrupt */
3426	wr32(hw, PFINT_OICR_ENA, 0);
3427	ice_flush(hw);
3428
3429	synchronize_irq(misc_irq_num);
3430	devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
 
 
 
3431
3432	ice_free_irq(pf, pf->oicr_irq);
3433	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3434		ice_free_irq_msix_ll_ts(pf);
3435}
3436
3437/**
3438 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3439 * @hw: pointer to HW structure
3440 * @reg_idx: HW vector index to associate the control queue interrupts with
3441 */
3442static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3443{
3444	u32 val;
3445
3446	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3447	       PFINT_OICR_CTL_CAUSE_ENA_M);
3448	wr32(hw, PFINT_OICR_CTL, val);
3449
3450	/* enable Admin queue Interrupt causes */
3451	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3452	       PFINT_FW_CTL_CAUSE_ENA_M);
3453	wr32(hw, PFINT_FW_CTL, val);
3454
3455	/* enable Mailbox queue Interrupt causes */
3456	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3457	       PFINT_MBX_CTL_CAUSE_ENA_M);
3458	wr32(hw, PFINT_MBX_CTL, val);
3459
3460	if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
3461		/* enable Sideband queue Interrupt causes */
3462		val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3463		       PFINT_SB_CTL_CAUSE_ENA_M);
3464		wr32(hw, PFINT_SB_CTL, val);
3465	}
3466
3467	ice_flush(hw);
3468}
3469
3470/**
3471 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3472 * @pf: board private structure
3473 *
3474 * This sets up the handler for MSIX 0, which is used to manage the
3475 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3476 * when in MSI or Legacy interrupt mode.
3477 */
3478static int ice_req_irq_msix_misc(struct ice_pf *pf)
3479{
3480	struct device *dev = ice_pf_to_dev(pf);
3481	struct ice_hw *hw = &pf->hw;
3482	u32 pf_intr_start_offset;
3483	struct msi_map irq;
3484	int err = 0;
3485
3486	if (!pf->int_name[0])
3487		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3488			 dev_driver_string(dev), dev_name(dev));
3489
3490	if (!pf->int_name_ll_ts[0])
3491		snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3492			 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
3493	/* Do not request IRQ but do enable OICR interrupt since settings are
3494	 * lost during reset. Note that this function is called only during
3495	 * rebuild path and not while reset is in progress.
3496	 */
3497	if (ice_is_reset_in_progress(pf->state))
3498		goto skip_req_irq;
3499
3500	/* reserve one vector in irq_tracker for misc interrupts */
3501	irq = ice_alloc_irq(pf, false);
3502	if (irq.index < 0)
3503		return irq.index;
3504
3505	pf->oicr_irq = irq;
3506	err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3507					ice_misc_intr_thread_fn, 0,
3508					pf->int_name, pf);
3509	if (err) {
3510		dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3511			pf->int_name, err);
3512		ice_free_irq(pf, pf->oicr_irq);
3513		return err;
3514	}
3515
3516	/* reserve one vector in irq_tracker for ll_ts interrupt */
3517	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3518		goto skip_req_irq;
3519
3520	irq = ice_alloc_irq(pf, false);
3521	if (irq.index < 0)
3522		return irq.index;
3523
3524	pf->ll_ts_irq = irq;
3525	err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3526			       pf->int_name_ll_ts, pf);
3527	if (err) {
3528		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3529			pf->int_name_ll_ts, err);
3530		ice_free_irq(pf, pf->ll_ts_irq);
 
3531		return err;
3532	}
3533
3534skip_req_irq:
3535	ice_ena_misc_vector(pf);
3536
3537	ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3538	/* This enables LL TS interrupt */
3539	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3540	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3541		wr32(hw, PFINT_SB_CTL,
3542		     ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3543		      PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
3544	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3545	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3546
3547	ice_flush(hw);
3548	ice_irq_dynamic_ena(hw, NULL, NULL);
3549
3550	return 0;
3551}
3552
3553/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3554 * ice_set_ops - set netdev and ethtools ops for the given netdev
3555 * @vsi: the VSI associated with the new netdev
3556 */
3557static void ice_set_ops(struct ice_vsi *vsi)
3558{
3559	struct net_device *netdev = vsi->netdev;
3560	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3561
3562	if (ice_is_safe_mode(pf)) {
3563		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3564		ice_set_ethtool_safe_mode_ops(netdev);
3565		return;
3566	}
3567
3568	netdev->netdev_ops = &ice_netdev_ops;
3569	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3570	netdev->xdp_metadata_ops = &ice_xdp_md_ops;
3571	ice_set_ethtool_ops(netdev);
3572
3573	if (vsi->type != ICE_VSI_PF)
3574		return;
3575
3576	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3577			       NETDEV_XDP_ACT_XSK_ZEROCOPY |
3578			       NETDEV_XDP_ACT_RX_SG;
3579	netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3580}
3581
3582/**
3583 * ice_set_netdev_features - set features for the given netdev
3584 * @netdev: netdev instance
3585 */
3586void ice_set_netdev_features(struct net_device *netdev)
3587{
3588	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3589	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3590	netdev_features_t csumo_features;
3591	netdev_features_t vlano_features;
3592	netdev_features_t dflt_features;
3593	netdev_features_t tso_features;
3594
3595	if (ice_is_safe_mode(pf)) {
3596		/* safe mode */
3597		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3598		netdev->hw_features = netdev->features;
3599		return;
3600	}
3601
3602	dflt_features = NETIF_F_SG	|
3603			NETIF_F_HIGHDMA	|
3604			NETIF_F_NTUPLE	|
3605			NETIF_F_RXHASH;
3606
3607	csumo_features = NETIF_F_RXCSUM	  |
3608			 NETIF_F_IP_CSUM  |
3609			 NETIF_F_SCTP_CRC |
3610			 NETIF_F_IPV6_CSUM;
3611
3612	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3613			 NETIF_F_HW_VLAN_CTAG_TX     |
3614			 NETIF_F_HW_VLAN_CTAG_RX;
3615
3616	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3617	if (is_dvm_ena)
3618		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3619
3620	tso_features = NETIF_F_TSO			|
3621		       NETIF_F_TSO_ECN			|
3622		       NETIF_F_TSO6			|
3623		       NETIF_F_GSO_GRE			|
3624		       NETIF_F_GSO_UDP_TUNNEL		|
3625		       NETIF_F_GSO_GRE_CSUM		|
3626		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3627		       NETIF_F_GSO_PARTIAL		|
3628		       NETIF_F_GSO_IPXIP4		|
3629		       NETIF_F_GSO_IPXIP6		|
3630		       NETIF_F_GSO_UDP_L4;
3631
3632	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3633					NETIF_F_GSO_GRE_CSUM;
3634	/* set features that user can change */
3635	netdev->hw_features = dflt_features | csumo_features |
3636			      vlano_features | tso_features;
3637
3638	/* add support for HW_CSUM on packets with MPLS header */
3639	netdev->mpls_features =  NETIF_F_HW_CSUM |
3640				 NETIF_F_TSO     |
3641				 NETIF_F_TSO6;
3642
3643	/* enable features */
3644	netdev->features |= netdev->hw_features;
3645
3646	netdev->hw_features |= NETIF_F_HW_TC;
3647	netdev->hw_features |= NETIF_F_LOOPBACK;
3648
3649	/* encap and VLAN devices inherit default, csumo and tso features */
3650	netdev->hw_enc_features |= dflt_features | csumo_features |
3651				   tso_features;
3652	netdev->vlan_features |= dflt_features | csumo_features |
3653				 tso_features;
 
3654
3655	/* advertise support but don't enable by default since only one type of
3656	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3657	 * type turns on the other has to be turned off. This is enforced by the
3658	 * ice_fix_features() ndo callback.
3659	 */
3660	if (is_dvm_ena)
3661		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3662			NETIF_F_HW_VLAN_STAG_TX;
 
 
 
3663
3664	/* Leave CRC / FCS stripping enabled by default, but allow the value to
3665	 * be changed at runtime
3666	 */
3667	netdev->hw_features |= NETIF_F_RXFCS;
3668
3669	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3670}
3671
3672/**
3673 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3674 * @lut: Lookup table
3675 * @rss_table_size: Lookup table size
3676 * @rss_size: Range of queue number for hashing
3677 */
3678void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3679{
3680	u16 i;
3681
3682	for (i = 0; i < rss_table_size; i++)
3683		lut[i] = i % rss_size;
3684}
3685
3686/**
3687 * ice_pf_vsi_setup - Set up a PF VSI
3688 * @pf: board private structure
3689 * @pi: pointer to the port_info instance
3690 *
3691 * Returns pointer to the successfully allocated VSI software struct
3692 * on success, otherwise returns NULL on failure.
3693 */
3694static struct ice_vsi *
3695ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3696{
3697	struct ice_vsi_cfg_params params = {};
3698
3699	params.type = ICE_VSI_PF;
3700	params.port_info = pi;
3701	params.flags = ICE_VSI_FLAG_INIT;
3702
3703	return ice_vsi_setup(pf, &params);
3704}
3705
3706static struct ice_vsi *
3707ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3708		   struct ice_channel *ch)
3709{
3710	struct ice_vsi_cfg_params params = {};
3711
3712	params.type = ICE_VSI_CHNL;
3713	params.port_info = pi;
3714	params.ch = ch;
3715	params.flags = ICE_VSI_FLAG_INIT;
3716
3717	return ice_vsi_setup(pf, &params);
3718}
3719
3720/**
3721 * ice_ctrl_vsi_setup - Set up a control VSI
3722 * @pf: board private structure
3723 * @pi: pointer to the port_info instance
3724 *
3725 * Returns pointer to the successfully allocated VSI software struct
3726 * on success, otherwise returns NULL on failure.
3727 */
3728static struct ice_vsi *
3729ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3730{
3731	struct ice_vsi_cfg_params params = {};
3732
3733	params.type = ICE_VSI_CTRL;
3734	params.port_info = pi;
3735	params.flags = ICE_VSI_FLAG_INIT;
3736
3737	return ice_vsi_setup(pf, &params);
3738}
3739
3740/**
3741 * ice_lb_vsi_setup - Set up a loopback VSI
3742 * @pf: board private structure
3743 * @pi: pointer to the port_info instance
3744 *
3745 * Returns pointer to the successfully allocated VSI software struct
3746 * on success, otherwise returns NULL on failure.
3747 */
3748struct ice_vsi *
3749ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3750{
3751	struct ice_vsi_cfg_params params = {};
3752
3753	params.type = ICE_VSI_LB;
3754	params.port_info = pi;
3755	params.flags = ICE_VSI_FLAG_INIT;
3756
3757	return ice_vsi_setup(pf, &params);
3758}
3759
3760/**
3761 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3762 * @netdev: network interface to be adjusted
3763 * @proto: VLAN TPID
3764 * @vid: VLAN ID to be added
3765 *
3766 * net_device_ops implementation for adding VLAN IDs
3767 */
3768int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 
 
3769{
3770	struct ice_netdev_priv *np = netdev_priv(netdev);
3771	struct ice_vsi_vlan_ops *vlan_ops;
3772	struct ice_vsi *vsi = np->vsi;
3773	struct ice_vlan vlan;
3774	int ret;
3775
3776	/* VLAN 0 is added by default during load/reset */
3777	if (!vid)
3778		return 0;
3779
3780	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3781		usleep_range(1000, 2000);
3782
3783	/* Add multicast promisc rule for the VLAN ID to be added if
3784	 * all-multicast is currently enabled.
3785	 */
3786	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3787		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3788					       ICE_MCAST_VLAN_PROMISC_BITS,
3789					       vid);
3790		if (ret)
3791			goto finish;
3792	}
3793
3794	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3795
3796	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3797	 * packets aren't pruned by the device's internal switch on Rx
3798	 */
3799	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3800	ret = vlan_ops->add_vlan(vsi, &vlan);
3801	if (ret)
3802		goto finish;
3803
3804	/* If all-multicast is currently enabled and this VLAN ID is only one
3805	 * besides VLAN-0 we have to update look-up type of multicast promisc
3806	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3807	 */
3808	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3809	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
3810		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3811					   ICE_MCAST_PROMISC_BITS, 0);
3812		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3813					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3814	}
3815
3816finish:
3817	clear_bit(ICE_CFG_BUSY, vsi->state);
3818
3819	return ret;
3820}
3821
3822/**
3823 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3824 * @netdev: network interface to be adjusted
3825 * @proto: VLAN TPID
3826 * @vid: VLAN ID to be removed
3827 *
3828 * net_device_ops implementation for removing VLAN IDs
3829 */
3830int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
 
 
3831{
3832	struct ice_netdev_priv *np = netdev_priv(netdev);
3833	struct ice_vsi_vlan_ops *vlan_ops;
3834	struct ice_vsi *vsi = np->vsi;
3835	struct ice_vlan vlan;
3836	int ret;
3837
3838	/* don't allow removal of VLAN 0 */
3839	if (!vid)
3840		return 0;
3841
3842	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3843		usleep_range(1000, 2000);
3844
3845	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3846				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3847	if (ret) {
3848		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3849			   vsi->vsi_num);
3850		vsi->current_netdev_flags |= IFF_ALLMULTI;
3851	}
3852
3853	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3854
3855	/* Make sure VLAN delete is successful before updating VLAN
3856	 * information
3857	 */
3858	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3859	ret = vlan_ops->del_vlan(vsi, &vlan);
3860	if (ret)
3861		goto finish;
3862
3863	/* Remove multicast promisc rule for the removed VLAN ID if
3864	 * all-multicast is enabled.
3865	 */
3866	if (vsi->current_netdev_flags & IFF_ALLMULTI)
3867		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3868					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
3869
3870	if (!ice_vsi_has_non_zero_vlans(vsi)) {
3871		/* Update look-up type of multicast promisc rule for VLAN 0
3872		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3873		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3874		 */
3875		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3876			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3877						   ICE_MCAST_VLAN_PROMISC_BITS,
3878						   0);
3879			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3880						 ICE_MCAST_PROMISC_BITS, 0);
3881		}
3882	}
3883
3884finish:
3885	clear_bit(ICE_CFG_BUSY, vsi->state);
 
3886
 
3887	return ret;
3888}
3889
3890/**
3891 * ice_rep_indr_tc_block_unbind
3892 * @cb_priv: indirection block private data
 
 
3893 */
3894static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3895{
3896	struct ice_indr_block_priv *indr_priv = cb_priv;
 
3897
3898	list_del(&indr_priv->list);
3899	kfree(indr_priv);
3900}
3901
3902/**
3903 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3904 * @vsi: VSI struct which has the netdev
3905 */
3906static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3907{
3908	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3909
3910	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3911				 ice_rep_indr_tc_block_unbind);
3912}
 
 
 
 
3913
3914/**
3915 * ice_tc_indir_block_register - Register TC indirect block notifications
3916 * @vsi: VSI struct which has the netdev
3917 *
3918 * Returns 0 on success, negative value on failure
3919 */
3920static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3921{
3922	struct ice_netdev_priv *np;
3923
3924	if (!vsi || !vsi->netdev)
3925		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3926
3927	np = netdev_priv(vsi->netdev);
 
3928
3929	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3930	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
 
 
 
 
 
 
 
 
 
 
 
3931}
3932
3933/**
3934 * ice_get_avail_q_count - Get count of queues in use
3935 * @pf_qmap: bitmap to get queue use count from
3936 * @lock: pointer to a mutex that protects access to pf_qmap
3937 * @size: size of the bitmap
3938 */
3939static u16
3940ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3941{
3942	unsigned long bit;
3943	u16 count = 0;
3944
3945	mutex_lock(lock);
3946	for_each_clear_bit(bit, pf_qmap, size)
3947		count++;
3948	mutex_unlock(lock);
3949
3950	return count;
3951}
3952
3953/**
3954 * ice_get_avail_txq_count - Get count of Tx queues in use
3955 * @pf: pointer to an ice_pf instance
3956 */
3957u16 ice_get_avail_txq_count(struct ice_pf *pf)
3958{
3959	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3960				     pf->max_pf_txqs);
3961}
3962
3963/**
3964 * ice_get_avail_rxq_count - Get count of Rx queues in use
3965 * @pf: pointer to an ice_pf instance
3966 */
3967u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3968{
3969	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3970				     pf->max_pf_rxqs);
3971}
3972
3973/**
3974 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3975 * @pf: board private structure to initialize
3976 */
3977static void ice_deinit_pf(struct ice_pf *pf)
3978{
3979	ice_service_task_stop(pf);
3980	mutex_destroy(&pf->lag_mutex);
3981	mutex_destroy(&pf->adev_mutex);
3982	mutex_destroy(&pf->sw_mutex);
3983	mutex_destroy(&pf->tc_mutex);
3984	mutex_destroy(&pf->avail_q_mutex);
3985	mutex_destroy(&pf->vfs.table_lock);
3986
3987	if (pf->avail_txqs) {
3988		bitmap_free(pf->avail_txqs);
3989		pf->avail_txqs = NULL;
3990	}
3991
3992	if (pf->avail_rxqs) {
3993		bitmap_free(pf->avail_rxqs);
3994		pf->avail_rxqs = NULL;
3995	}
3996
3997	if (pf->ptp.clock)
3998		ptp_clock_unregister(pf->ptp.clock);
3999
4000	xa_destroy(&pf->dyn_ports);
4001	xa_destroy(&pf->sf_nums);
4002}
4003
4004/**
4005 * ice_set_pf_caps - set PFs capability flags
4006 * @pf: pointer to the PF instance
4007 */
4008static void ice_set_pf_caps(struct ice_pf *pf)
4009{
4010	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
4011
4012	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4013	if (func_caps->common_cap.rdma)
 
4014		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
 
 
4015	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4016	if (func_caps->common_cap.dcb)
4017		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4018	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4019	if (func_caps->common_cap.sr_iov_1_1) {
4020		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4021		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
4022					      ICE_MAX_SRIOV_VFS);
4023	}
4024	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
4025	if (func_caps->common_cap.rss_table_size)
4026		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
4027
4028	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
4029	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
4030		u16 unused;
4031
4032		/* ctrl_vsi_idx will be set to a valid value when flow director
4033		 * is setup by ice_init_fdir
4034		 */
4035		pf->ctrl_vsi_idx = ICE_NO_VSI;
4036		set_bit(ICE_FLAG_FD_ENA, pf->flags);
4037		/* force guaranteed filter pool for PF */
4038		ice_alloc_fd_guar_item(&pf->hw, &unused,
4039				       func_caps->fd_fltr_guar);
4040		/* force shared filter pool for PF */
4041		ice_alloc_fd_shrd_item(&pf->hw, &unused,
4042				       func_caps->fd_fltr_best_effort);
4043	}
4044
4045	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4046	if (func_caps->common_cap.ieee_1588 &&
4047	    !(pf->hw.mac_type == ICE_MAC_E830))
4048		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4049
4050	pf->max_pf_txqs = func_caps->common_cap.num_txq;
4051	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4052}
4053
4054/**
4055 * ice_init_pf - Initialize general software structures (struct ice_pf)
4056 * @pf: board private structure to initialize
4057 */
4058static int ice_init_pf(struct ice_pf *pf)
4059{
4060	ice_set_pf_caps(pf);
4061
4062	mutex_init(&pf->sw_mutex);
4063	mutex_init(&pf->tc_mutex);
4064	mutex_init(&pf->adev_mutex);
4065	mutex_init(&pf->lag_mutex);
4066
4067	INIT_HLIST_HEAD(&pf->aq_wait_list);
4068	spin_lock_init(&pf->aq_wait_lock);
4069	init_waitqueue_head(&pf->aq_wait_queue);
4070
4071	init_waitqueue_head(&pf->reset_wait_queue);
4072
4073	/* setup service timer and periodic service task */
4074	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4075	pf->serv_tmr_period = HZ;
4076	INIT_WORK(&pf->serv_task, ice_service_task);
4077	clear_bit(ICE_SERVICE_SCHED, pf->state);
4078
4079	mutex_init(&pf->avail_q_mutex);
4080	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4081	if (!pf->avail_txqs)
4082		return -ENOMEM;
4083
4084	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4085	if (!pf->avail_rxqs) {
4086		bitmap_free(pf->avail_txqs);
4087		pf->avail_txqs = NULL;
4088		return -ENOMEM;
4089	}
4090
4091	mutex_init(&pf->vfs.table_lock);
4092	hash_init(pf->vfs.table);
4093	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
4094		wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
4095		     ICE_MBX_OVERFLOW_WATERMARK);
4096	else
4097		ice_mbx_init_snapshot(&pf->hw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4098
4099	xa_init(&pf->dyn_ports);
4100	xa_init(&pf->sf_nums);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4101
4102	return 0;
4103}
4104
4105/**
4106 * ice_is_wol_supported - check if WoL is supported
4107 * @hw: pointer to hardware info
4108 *
4109 * Check if WoL is supported based on the HW configuration.
4110 * Returns true if NVM supports and enables WoL for this port, false otherwise
4111 */
4112bool ice_is_wol_supported(struct ice_hw *hw)
4113{
4114	u16 wol_ctrl;
4115
4116	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4117	 * word) indicates WoL is not supported on the corresponding PF ID.
4118	 */
4119	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4120		return false;
4121
4122	return !(BIT(hw->port_info->lport) & wol_ctrl);
4123}
4124
4125/**
4126 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4127 * @vsi: VSI being changed
4128 * @new_rx: new number of Rx queues
4129 * @new_tx: new number of Tx queues
4130 * @locked: is adev device_lock held
4131 *
4132 * Only change the number of queues if new_tx, or new_rx is non-0.
4133 *
4134 * Returns 0 on success.
4135 */
4136int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4137{
4138	struct ice_pf *pf = vsi->back;
4139	int i, err = 0, timeout = 50;
4140
4141	if (!new_rx && !new_tx)
4142		return -EINVAL;
4143
4144	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4145		timeout--;
4146		if (!timeout)
4147			return -EBUSY;
4148		usleep_range(1000, 2000);
4149	}
4150
4151	if (new_tx)
4152		vsi->req_txq = (u16)new_tx;
4153	if (new_rx)
4154		vsi->req_rxq = (u16)new_rx;
4155
4156	/* set for the next time the netdev is started */
4157	if (!netif_running(vsi->netdev)) {
4158		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4159		if (err)
4160			goto rebuild_err;
4161		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4162		goto done;
4163	}
4164
4165	ice_vsi_close(vsi);
4166	err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4167	if (err)
4168		goto rebuild_err;
4169
4170	ice_for_each_traffic_class(i) {
4171		if (vsi->tc_cfg.ena_tc & BIT(i))
4172			netdev_set_tc_queue(vsi->netdev,
4173					    vsi->tc_cfg.tc_info[i].netdev_tc,
4174					    vsi->tc_cfg.tc_info[i].qcount_tx,
4175					    vsi->tc_cfg.tc_info[i].qoffset);
4176	}
4177	ice_pf_dcb_recfg(pf, locked);
4178	ice_vsi_open(vsi);
4179	goto done;
4180
4181rebuild_err:
4182	dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
4183		err);
4184done:
4185	clear_bit(ICE_CFG_BUSY, pf->state);
4186	return err;
4187}
4188
4189/**
4190 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4191 * @pf: PF to configure
4192 *
4193 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4194 * VSI can still Tx/Rx VLAN tagged packets.
4195 */
4196static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4197{
4198	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4199	struct ice_vsi_ctx *ctxt;
 
4200	struct ice_hw *hw;
4201	int status;
4202
4203	if (!vsi)
4204		return;
4205
4206	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4207	if (!ctxt)
4208		return;
4209
4210	hw = &pf->hw;
4211	ctxt->info = vsi->info;
4212
4213	ctxt->info.valid_sections =
4214		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4215			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4216			    ICE_AQ_VSI_PROP_SW_VALID);
4217
4218	/* disable VLAN anti-spoof */
4219	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4220				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4221
4222	/* disable VLAN pruning and keep all other settings */
4223	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4224
4225	/* allow all VLANs on Tx and don't strip on Rx */
4226	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4227		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4228
4229	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4230	if (status) {
4231		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4232			status, ice_aq_str(hw->adminq.sq_last_status));
 
4233	} else {
4234		vsi->info.sec_flags = ctxt->info.sec_flags;
4235		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4236		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4237	}
4238
4239	kfree(ctxt);
4240}
4241
4242/**
4243 * ice_log_pkg_init - log result of DDP package load
4244 * @hw: pointer to hardware info
4245 * @state: state of package load
4246 */
4247static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
 
4248{
4249	struct ice_pf *pf = hw->back;
4250	struct device *dev;
4251
4252	dev = ice_pf_to_dev(pf);
4253
4254	switch (state) {
4255	case ICE_DDP_PKG_SUCCESS:
4256		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4257			 hw->active_pkg_name,
4258			 hw->active_pkg_ver.major,
4259			 hw->active_pkg_ver.minor,
4260			 hw->active_pkg_ver.update,
4261			 hw->active_pkg_ver.draft);
4262		break;
4263	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4264		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4265			 hw->active_pkg_name,
4266			 hw->active_pkg_ver.major,
4267			 hw->active_pkg_ver.minor,
4268			 hw->active_pkg_ver.update,
4269			 hw->active_pkg_ver.draft);
4270		break;
4271	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4272		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4273			hw->active_pkg_name,
4274			hw->active_pkg_ver.major,
4275			hw->active_pkg_ver.minor,
4276			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4277		break;
4278	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4279		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4280			 hw->active_pkg_name,
4281			 hw->active_pkg_ver.major,
4282			 hw->active_pkg_ver.minor,
4283			 hw->active_pkg_ver.update,
4284			 hw->active_pkg_ver.draft,
4285			 hw->pkg_name,
4286			 hw->pkg_ver.major,
4287			 hw->pkg_ver.minor,
4288			 hw->pkg_ver.update,
4289			 hw->pkg_ver.draft);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4290		break;
4291	case ICE_DDP_PKG_FW_MISMATCH:
4292		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4293		break;
4294	case ICE_DDP_PKG_INVALID_FILE:
 
4295		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4296		break;
4297	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4298		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4299		break;
4300	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4301		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4302			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4303		break;
4304	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4305		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4306		break;
4307	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4308		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4309		break;
4310	case ICE_DDP_PKG_LOAD_ERROR:
4311		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4312		/* poll for reset to complete */
4313		if (ice_check_reset(hw))
4314			dev_err(dev, "Error resetting device. Please reload the driver\n");
4315		break;
4316	case ICE_DDP_PKG_ERR:
 
 
 
 
 
 
 
 
 
 
 
 
4317	default:
4318		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
 
4319		break;
4320	}
4321}
4322
4323/**
4324 * ice_load_pkg - load/reload the DDP Package file
4325 * @firmware: firmware structure when firmware requested or NULL for reload
4326 * @pf: pointer to the PF instance
4327 *
4328 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4329 * initialize HW tables.
4330 */
4331static void
4332ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4333{
4334	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4335	struct device *dev = ice_pf_to_dev(pf);
4336	struct ice_hw *hw = &pf->hw;
4337
4338	/* Load DDP Package */
4339	if (firmware && !hw->pkg_copy) {
4340		state = ice_copy_and_init_pkg(hw, firmware->data,
4341					      firmware->size);
4342		ice_log_pkg_init(hw, state);
4343	} else if (!firmware && hw->pkg_copy) {
4344		/* Reload package during rebuild after CORER/GLOBR reset */
4345		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4346		ice_log_pkg_init(hw, state);
4347	} else {
4348		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4349	}
4350
4351	if (!ice_is_init_pkg_successful(state)) {
4352		/* Safe Mode */
4353		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4354		return;
4355	}
4356
4357	/* Successful download package is the precondition for advanced
4358	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4359	 */
4360	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4361}
4362
4363/**
4364 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4365 * @pf: pointer to the PF structure
4366 *
4367 * There is no error returned here because the driver should be able to handle
4368 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4369 * specifically with Tx.
4370 */
4371static void ice_verify_cacheline_size(struct ice_pf *pf)
4372{
4373	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4374		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4375			 ICE_CACHE_LINE_BYTES);
4376}
4377
4378/**
4379 * ice_send_version - update firmware with driver version
4380 * @pf: PF struct
4381 *
4382 * Returns 0 on success, else error code
4383 */
4384static int ice_send_version(struct ice_pf *pf)
4385{
4386	struct ice_driver_ver dv;
4387
4388	dv.major_ver = 0xff;
4389	dv.minor_ver = 0xff;
4390	dv.build_ver = 0xff;
4391	dv.subbuild_ver = 0;
4392	strscpy((char *)dv.driver_string, UTS_RELEASE,
4393		sizeof(dv.driver_string));
4394	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4395}
4396
4397/**
4398 * ice_init_fdir - Initialize flow director VSI and configuration
4399 * @pf: pointer to the PF instance
4400 *
4401 * returns 0 on success, negative on error
4402 */
4403static int ice_init_fdir(struct ice_pf *pf)
4404{
4405	struct device *dev = ice_pf_to_dev(pf);
4406	struct ice_vsi *ctrl_vsi;
4407	int err;
4408
4409	/* Side Band Flow Director needs to have a control VSI.
4410	 * Allocate it and store it in the PF.
4411	 */
4412	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4413	if (!ctrl_vsi) {
4414		dev_dbg(dev, "could not create control VSI\n");
4415		return -ENOMEM;
4416	}
4417
4418	err = ice_vsi_open_ctrl(ctrl_vsi);
4419	if (err) {
4420		dev_dbg(dev, "could not open control VSI\n");
4421		goto err_vsi_open;
4422	}
4423
4424	mutex_init(&pf->hw.fdir_fltr_lock);
4425
4426	err = ice_fdir_create_dflt_rules(pf);
4427	if (err)
4428		goto err_fdir_rule;
4429
4430	return 0;
4431
4432err_fdir_rule:
4433	ice_fdir_release_flows(&pf->hw);
4434	ice_vsi_close(ctrl_vsi);
4435err_vsi_open:
4436	ice_vsi_release(ctrl_vsi);
4437	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4438		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4439		pf->ctrl_vsi_idx = ICE_NO_VSI;
4440	}
4441	return err;
4442}
4443
4444static void ice_deinit_fdir(struct ice_pf *pf)
4445{
4446	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4447
4448	if (!vsi)
4449		return;
4450
4451	ice_vsi_manage_fdir(vsi, false);
4452	ice_vsi_release(vsi);
4453	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4454		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4455		pf->ctrl_vsi_idx = ICE_NO_VSI;
4456	}
4457
4458	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4459}
4460
4461/**
4462 * ice_get_opt_fw_name - return optional firmware file name or NULL
4463 * @pf: pointer to the PF instance
4464 */
4465static char *ice_get_opt_fw_name(struct ice_pf *pf)
4466{
4467	/* Optional firmware name same as default with additional dash
4468	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4469	 */
4470	struct pci_dev *pdev = pf->pdev;
4471	char *opt_fw_filename;
4472	u64 dsn;
4473
4474	/* Determine the name of the optional file using the DSN (two
4475	 * dwords following the start of the DSN Capability).
4476	 */
4477	dsn = pci_get_dsn(pdev);
4478	if (!dsn)
4479		return NULL;
4480
4481	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4482	if (!opt_fw_filename)
4483		return NULL;
4484
4485	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4486		 ICE_DDP_PKG_PATH, dsn);
4487
4488	return opt_fw_filename;
4489}
4490
4491/**
4492 * ice_request_fw - Device initialization routine
4493 * @pf: pointer to the PF instance
4494 * @firmware: double pointer to firmware struct
4495 *
4496 * Return: zero when successful, negative values otherwise.
4497 */
4498static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
4499{
4500	char *opt_fw_filename = ice_get_opt_fw_name(pf);
 
4501	struct device *dev = ice_pf_to_dev(pf);
4502	int err = 0;
4503
4504	/* optional device-specific DDP (if present) overrides the default DDP
4505	 * package file. kernel logs a debug message if the file doesn't exist,
4506	 * and warning messages for other errors.
4507	 */
4508	if (opt_fw_filename) {
4509		err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
4510		kfree(opt_fw_filename);
4511		if (!err)
4512			return err;
4513	}
4514	err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
4515	if (err)
4516		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4517
4518	return err;
4519}
4520
4521/**
4522 * ice_init_tx_topology - performs Tx topology initialization
4523 * @hw: pointer to the hardware structure
4524 * @firmware: pointer to firmware structure
4525 *
4526 * Return: zero when init was successful, negative values otherwise.
4527 */
4528static int
4529ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
4530{
4531	u8 num_tx_sched_layers = hw->num_tx_sched_layers;
4532	struct ice_pf *pf = hw->back;
4533	struct device *dev;
4534	int err;
4535
4536	dev = ice_pf_to_dev(pf);
4537	err = ice_cfg_tx_topo(hw, firmware->data, firmware->size);
4538	if (!err) {
4539		if (hw->num_tx_sched_layers > num_tx_sched_layers)
4540			dev_info(dev, "Tx scheduling layers switching feature disabled\n");
4541		else
4542			dev_info(dev, "Tx scheduling layers switching feature enabled\n");
4543		/* if there was a change in topology ice_cfg_tx_topo triggered
4544		 * a CORER and we need to re-init hw
4545		 */
4546		ice_deinit_hw(hw);
4547		err = ice_init_hw(hw);
4548
4549		return err;
4550	} else if (err == -EIO) {
4551		dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
4552	}
4553
4554	return 0;
4555}
4556
4557/**
4558 * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
4559 * @hw: pointer to the hardware structure
4560 * @pf: pointer to pf structure
4561 *
4562 * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
4563 * formats the PF hardware supports. The exact list of supported RXDIDs
4564 * depends on the loaded DDP package. The IDs can be determined by reading the
4565 * GLFLXP_RXDID_FLAGS register after the DDP package is loaded.
4566 *
4567 * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
4568 * in the DDP package. The 16-byte legacy descriptor is never supported by
4569 * VFs.
4570 */
4571static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf)
4572{
4573	pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1);
4574
4575	for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
4576		u32 regval;
4577
4578		regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
4579		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
4580			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
4581			pf->supported_rxdids |= BIT(i);
4582	}
4583}
4584
4585/**
4586 * ice_init_ddp_config - DDP related configuration
4587 * @hw: pointer to the hardware structure
4588 * @pf: pointer to pf structure
4589 *
4590 * This function loads DDP file from the disk, then initializes Tx
4591 * topology. At the end DDP package is loaded on the card.
4592 *
4593 * Return: zero when init was successful, negative values otherwise.
4594 */
4595static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
4596{
4597	struct device *dev = ice_pf_to_dev(pf);
4598	const struct firmware *firmware = NULL;
4599	int err;
4600
4601	err = ice_request_fw(pf, &firmware);
4602	if (err) {
4603		dev_err(dev, "Fail during requesting FW: %d\n", err);
4604		return err;
 
4605	}
4606
4607	err = ice_init_tx_topology(hw, firmware);
 
4608	if (err) {
4609		dev_err(dev, "Fail during initialization of Tx topology: %d\n",
4610			err);
4611		release_firmware(firmware);
4612		return err;
4613	}
4614
4615	/* Download firmware to device */
4616	ice_load_pkg(firmware, pf);
4617	release_firmware(firmware);
4618
4619	/* Initialize the supported Rx descriptor IDs after loading DDP */
4620	ice_init_supported_rxdids(hw, pf);
4621
4622	return 0;
4623}
4624
4625/**
4626 * ice_print_wake_reason - show the wake up cause in the log
4627 * @pf: pointer to the PF struct
4628 */
4629static void ice_print_wake_reason(struct ice_pf *pf)
4630{
4631	u32 wus = pf->wakeup_reason;
4632	const char *wake_str;
4633
4634	/* if no wake event, nothing to print */
4635	if (!wus)
4636		return;
4637
4638	if (wus & PFPM_WUS_LNKC_M)
4639		wake_str = "Link\n";
4640	else if (wus & PFPM_WUS_MAG_M)
4641		wake_str = "Magic Packet\n";
4642	else if (wus & PFPM_WUS_MNG_M)
4643		wake_str = "Management\n";
4644	else if (wus & PFPM_WUS_FW_RST_WK_M)
4645		wake_str = "Firmware Reset\n";
4646	else
4647		wake_str = "Unknown\n";
4648
4649	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4650}
4651
4652/**
4653 * ice_pf_fwlog_update_module - update 1 module
4654 * @pf: pointer to the PF struct
4655 * @log_level: log_level to use for the @module
4656 * @module: module to update
4657 */
4658void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
4659{
4660	struct ice_hw *hw = &pf->hw;
4661
4662	hw->fwlog_cfg.module_entries[module].log_level = log_level;
4663}
4664
4665/**
4666 * ice_register_netdev - register netdev
4667 * @vsi: pointer to the VSI struct
4668 */
4669static int ice_register_netdev(struct ice_vsi *vsi)
4670{
4671	int err;
4672
 
4673	if (!vsi || !vsi->netdev)
4674		return -EIO;
4675
4676	err = register_netdev(vsi->netdev);
4677	if (err)
4678		return err;
4679
4680	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4681	netif_carrier_off(vsi->netdev);
4682	netif_tx_stop_all_queues(vsi->netdev);
 
 
 
4683
4684	return 0;
4685}
4686
4687static void ice_unregister_netdev(struct ice_vsi *vsi)
4688{
4689	if (!vsi || !vsi->netdev)
4690		return;
4691
 
 
4692	unregister_netdev(vsi->netdev);
4693	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
 
 
 
 
 
4694}
4695
4696/**
4697 * ice_cfg_netdev - Allocate, configure and register a netdev
4698 * @vsi: the VSI associated with the new netdev
 
4699 *
4700 * Returns 0 on success, negative value on failure
4701 */
4702static int ice_cfg_netdev(struct ice_vsi *vsi)
 
4703{
4704	struct ice_netdev_priv *np;
4705	struct net_device *netdev;
4706	u8 mac_addr[ETH_ALEN];
 
4707
4708	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4709				    vsi->alloc_rxq);
4710	if (!netdev)
4711		return -ENOMEM;
4712
4713	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4714	vsi->netdev = netdev;
4715	np = netdev_priv(netdev);
4716	np->vsi = vsi;
 
 
4717
4718	ice_set_netdev_features(netdev);
4719	ice_set_ops(vsi);
 
 
 
4720
4721	if (vsi->type == ICE_VSI_PF) {
4722		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4723		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4724		eth_hw_addr_set(netdev, mac_addr);
 
 
 
 
 
 
 
 
 
 
4725	}
4726
4727	netdev->priv_flags |= IFF_UNICAST_FLT;
 
4728
4729	/* Setup netdev TC information */
4730	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
 
 
 
4731
4732	netdev->max_mtu = ICE_MAX_MTU;
 
 
4733
4734	return 0;
4735}
 
 
 
 
 
 
 
4736
4737static void ice_decfg_netdev(struct ice_vsi *vsi)
4738{
4739	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4740	free_netdev(vsi->netdev);
4741	vsi->netdev = NULL;
4742}
4743
4744/**
4745 * ice_wait_for_fw - wait for full FW readiness
4746 * @hw: pointer to the hardware structure
4747 * @timeout: milliseconds that can elapse before timing out
4748 */
4749static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4750{
4751	int fw_loading;
4752	u32 elapsed = 0;
4753
4754	while (elapsed <= timeout) {
4755		fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4756
4757		/* firmware was not yet loaded, we have to wait more */
4758		if (fw_loading) {
4759			elapsed += 100;
4760			msleep(100);
4761			continue;
4762		}
4763		return 0;
4764	}
4765
4766	return -ETIMEDOUT;
4767}
4768
4769int ice_init_dev(struct ice_pf *pf)
4770{
4771	struct device *dev = ice_pf_to_dev(pf);
4772	struct ice_hw *hw = &pf->hw;
4773	int err;
4774
4775	err = ice_init_hw(hw);
4776	if (err) {
4777		dev_err(dev, "ice_init_hw failed: %d\n", err);
4778		return err;
4779	}
4780
4781	/* Some cards require longer initialization times
4782	 * due to necessity of loading FW from an external source.
4783	 * This can take even half a minute.
4784	 */
4785	if (ice_is_pf_c827(hw)) {
4786		err = ice_wait_for_fw(hw, 30000);
4787		if (err) {
4788			dev_err(dev, "ice_wait_for_fw timed out");
4789			return err;
4790		}
4791	}
4792
4793	ice_init_feature_support(pf);
4794
4795	err = ice_init_ddp_config(hw, pf);
4796
4797	/* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
4798	 * set in pf->state, which will cause ice_is_safe_mode to return
4799	 * true
4800	 */
4801	if (err || ice_is_safe_mode(pf)) {
 
4802		/* we already got function/device capabilities but these don't
4803		 * reflect what the driver needs to do in safe mode. Instead of
4804		 * adding conditional logic everywhere to ignore these
4805		 * device/function capabilities, override them.
4806		 */
4807		ice_set_safe_mode_caps(hw);
4808	}
4809
4810	err = ice_init_pf(pf);
4811	if (err) {
4812		dev_err(dev, "ice_init_pf failed: %d\n", err);
4813		goto err_init_pf;
4814	}
4815
 
 
4816	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4817	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4818	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4819	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
 
4820	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4821		pf->hw.udp_tunnel_nic.tables[0].n_entries =
4822			pf->hw.tnl.valid_count[TNL_VXLAN];
4823		pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4824			UDP_TUNNEL_TYPE_VXLAN;
 
4825	}
4826	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4827		pf->hw.udp_tunnel_nic.tables[1].n_entries =
4828			pf->hw.tnl.valid_count[TNL_GENEVE];
4829		pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4830			UDP_TUNNEL_TYPE_GENEVE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4831	}
4832
4833	err = ice_init_interrupt_scheme(pf);
4834	if (err) {
4835		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4836		err = -EIO;
4837		goto err_init_interrupt_scheme;
4838	}
4839
4840	/* In case of MSIX we are going to setup the misc vector right here
4841	 * to handle admin queue events etc. In case of legacy and MSI
4842	 * the misc functionality and queue processing is combined in
4843	 * the same vector and that gets setup at open.
4844	 */
4845	err = ice_req_irq_msix_misc(pf);
4846	if (err) {
4847		dev_err(dev, "setup of misc vector failed: %d\n", err);
4848		goto err_req_irq_msix_misc;
4849	}
4850
4851	return 0;
4852
4853err_req_irq_msix_misc:
4854	ice_clear_interrupt_scheme(pf);
4855err_init_interrupt_scheme:
4856	ice_deinit_pf(pf);
4857err_init_pf:
4858	ice_deinit_hw(hw);
4859	return err;
4860}
4861
4862void ice_deinit_dev(struct ice_pf *pf)
4863{
4864	ice_free_irq_msix_misc(pf);
4865	ice_deinit_pf(pf);
4866	ice_deinit_hw(&pf->hw);
4867
4868	/* Service task is already stopped, so call reset directly. */
4869	ice_reset(&pf->hw, ICE_RESET_PFR);
4870	pci_wait_for_pending_transaction(pf->pdev);
4871	ice_clear_interrupt_scheme(pf);
4872}
4873
4874static void ice_init_features(struct ice_pf *pf)
4875{
4876	struct device *dev = ice_pf_to_dev(pf);
4877
4878	if (ice_is_safe_mode(pf))
4879		return;
4880
4881	/* initialize DDP driven features */
4882	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4883		ice_ptp_init(pf);
4884
4885	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4886		ice_gnss_init(pf);
4887
4888	if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4889	    ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4890		ice_dpll_init(pf);
4891
4892	/* Note: Flow director init failure is non-fatal to load */
4893	if (ice_init_fdir(pf))
4894		dev_err(dev, "could not initialize flow director\n");
4895
4896	/* Note: DCB init failure is non-fatal to load */
4897	if (ice_init_pf_dcb(pf, false)) {
4898		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4899		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4900	} else {
4901		ice_cfg_lldp_mib_change(&pf->hw, true);
4902	}
4903
4904	if (ice_init_lag(pf))
4905		dev_warn(dev, "Failed to init link aggregation support\n");
4906
4907	ice_hwmon_init(pf);
4908}
4909
4910static void ice_deinit_features(struct ice_pf *pf)
4911{
4912	if (ice_is_safe_mode(pf))
4913		return;
4914
4915	ice_deinit_lag(pf);
4916	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4917		ice_cfg_lldp_mib_change(&pf->hw, false);
4918	ice_deinit_fdir(pf);
4919	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4920		ice_gnss_exit(pf);
4921	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4922		ice_ptp_release(pf);
4923	if (test_bit(ICE_FLAG_DPLL, pf->flags))
4924		ice_dpll_deinit(pf);
4925	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4926		xa_destroy(&pf->eswitch.reprs);
4927}
4928
4929static void ice_init_wakeup(struct ice_pf *pf)
4930{
4931	/* Save wakeup reason register for later use */
4932	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4933
4934	/* check for a power management event */
4935	ice_print_wake_reason(pf);
 
 
 
4936
4937	/* clear wake status, all bits */
4938	wr32(&pf->hw, PFPM_WUS, U32_MAX);
4939
4940	/* Disable WoL at init, wait for user to enable */
4941	device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4942}
 
 
 
 
4943
4944static int ice_init_link(struct ice_pf *pf)
4945{
4946	struct device *dev = ice_pf_to_dev(pf);
4947	int err;
4948
4949	err = ice_init_link_events(pf->hw.port_info);
4950	if (err) {
4951		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4952		return err;
4953	}
4954
4955	/* not a fatal error if this fails */
4956	err = ice_init_nvm_phy_type(pf->hw.port_info);
4957	if (err)
4958		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4959
4960	/* not a fatal error if this fails */
4961	err = ice_update_link_info(pf->hw.port_info);
4962	if (err)
4963		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4964
4965	ice_init_link_dflt_override(pf->hw.port_info);
4966
4967	ice_check_link_cfg_err(pf,
4968			       pf->hw.port_info->phy.link_info.link_cfg_err);
4969
4970	/* if media available, initialize PHY settings */
4971	if (pf->hw.port_info->phy.link_info.link_info &
4972	    ICE_AQ_MEDIA_AVAILABLE) {
4973		/* not a fatal error if this fails */
4974		err = ice_init_phy_user_cfg(pf->hw.port_info);
4975		if (err)
4976			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4977
4978		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4979			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4980
4981			if (vsi)
4982				ice_configure_phy(vsi);
4983		}
4984	} else {
4985		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4986	}
4987
4988	return err;
4989}
4990
4991static int ice_init_pf_sw(struct ice_pf *pf)
4992{
4993	bool dvm = ice_is_dvm_ena(&pf->hw);
4994	struct ice_vsi *vsi;
4995	int err;
4996
4997	/* create switch struct for the switch element created by FW on boot */
4998	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4999	if (!pf->first_sw)
5000		return -ENOMEM;
5001
5002	if (pf->hw.evb_veb)
5003		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
5004	else
5005		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
5006
5007	pf->first_sw->pf = pf;
 
5008
5009	/* record the sw_id available for later use */
5010	pf->first_sw->sw_id = pf->hw.port_info->sw_id;
5011
5012	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
5013	if (err)
5014		goto err_aq_set_port_params;
5015
5016	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
5017	if (!vsi) {
5018		err = -ENOMEM;
5019		goto err_pf_vsi_setup;
5020	}
5021
5022	return 0;
5023
5024err_pf_vsi_setup:
5025err_aq_set_port_params:
5026	kfree(pf->first_sw);
5027	return err;
5028}
5029
5030static void ice_deinit_pf_sw(struct ice_pf *pf)
5031{
5032	struct ice_vsi *vsi = ice_get_main_vsi(pf);
5033
5034	if (!vsi)
5035		return;
5036
5037	ice_vsi_release(vsi);
5038	kfree(pf->first_sw);
5039}
5040
5041static int ice_alloc_vsis(struct ice_pf *pf)
5042{
5043	struct device *dev = ice_pf_to_dev(pf);
5044
5045	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
5046	if (!pf->num_alloc_vsi)
5047		return -EIO;
5048
5049	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
5050		dev_warn(dev,
5051			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
5052			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
5053		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
5054	}
5055
5056	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
5057			       GFP_KERNEL);
5058	if (!pf->vsi)
5059		return -ENOMEM;
5060
5061	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
5062				     sizeof(*pf->vsi_stats), GFP_KERNEL);
5063	if (!pf->vsi_stats) {
5064		devm_kfree(dev, pf->vsi);
5065		return -ENOMEM;
 
5066	}
5067
5068	return 0;
5069}
5070
5071static void ice_dealloc_vsis(struct ice_pf *pf)
5072{
5073	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
5074	pf->vsi_stats = NULL;
5075
5076	pf->num_alloc_vsi = 0;
5077	devm_kfree(ice_pf_to_dev(pf), pf->vsi);
5078	pf->vsi = NULL;
5079}
5080
5081static int ice_init_devlink(struct ice_pf *pf)
5082{
5083	int err;
5084
5085	err = ice_devlink_register_params(pf);
5086	if (err)
5087		return err;
5088
5089	ice_devlink_init_regions(pf);
5090	ice_devlink_register(pf);
5091
5092	return 0;
5093}
5094
5095static void ice_deinit_devlink(struct ice_pf *pf)
5096{
5097	ice_devlink_unregister(pf);
5098	ice_devlink_destroy_regions(pf);
5099	ice_devlink_unregister_params(pf);
5100}
5101
5102static int ice_init(struct ice_pf *pf)
5103{
5104	int err;
5105
5106	err = ice_init_dev(pf);
5107	if (err)
5108		return err;
5109
5110	err = ice_alloc_vsis(pf);
5111	if (err)
5112		goto err_alloc_vsis;
5113
5114	err = ice_init_pf_sw(pf);
5115	if (err)
5116		goto err_init_pf_sw;
5117
5118	ice_init_wakeup(pf);
5119
5120	err = ice_init_link(pf);
5121	if (err)
5122		goto err_init_link;
5123
5124	err = ice_send_version(pf);
 
5125	if (err)
5126		goto err_init_link;
5127
5128	ice_verify_cacheline_size(pf);
5129
5130	if (ice_is_safe_mode(pf))
5131		ice_set_safe_mode_vlan_cfg(pf);
5132	else
5133		/* print PCI link speed and width */
5134		pcie_print_link_status(pf->pdev);
5135
5136	/* ready to go, so clear down state bit */
5137	clear_bit(ICE_DOWN, pf->state);
5138	clear_bit(ICE_SERVICE_DIS, pf->state);
 
 
 
 
 
 
5139
5140	/* since everything is good, start the service timer */
5141	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
 
 
 
 
 
 
 
5142
5143	return 0;
5144
5145err_init_link:
5146	ice_deinit_pf_sw(pf);
5147err_init_pf_sw:
5148	ice_dealloc_vsis(pf);
5149err_alloc_vsis:
5150	ice_deinit_dev(pf);
5151	return err;
5152}
5153
5154static void ice_deinit(struct ice_pf *pf)
5155{
5156	set_bit(ICE_SERVICE_DIS, pf->state);
5157	set_bit(ICE_DOWN, pf->state);
5158
5159	ice_deinit_pf_sw(pf);
5160	ice_dealloc_vsis(pf);
5161	ice_deinit_dev(pf);
5162}
5163
5164/**
5165 * ice_load - load pf by init hw and starting VSI
5166 * @pf: pointer to the pf instance
5167 *
5168 * This function has to be called under devl_lock.
5169 */
5170int ice_load(struct ice_pf *pf)
5171{
5172	struct ice_vsi *vsi;
5173	int err;
5174
5175	devl_assert_locked(priv_to_devlink(pf));
5176
5177	vsi = ice_get_main_vsi(pf);
5178
5179	/* init channel list */
5180	INIT_LIST_HEAD(&vsi->ch_list);
5181
5182	err = ice_cfg_netdev(vsi);
5183	if (err)
5184		return err;
5185
5186	/* Setup DCB netlink interface */
5187	ice_dcbnl_setup(vsi);
5188
5189	err = ice_init_mac_fltr(pf);
5190	if (err)
5191		goto err_init_mac_fltr;
5192
5193	err = ice_devlink_create_pf_port(pf);
5194	if (err)
5195		goto err_devlink_create_pf_port;
5196
5197	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5198
5199	err = ice_register_netdev(vsi);
5200	if (err)
5201		goto err_register_netdev;
5202
5203	err = ice_tc_indir_block_register(vsi);
5204	if (err)
5205		goto err_tc_indir_block_register;
5206
5207	ice_napi_add(vsi);
5208
5209	err = ice_init_rdma(pf);
5210	if (err)
5211		goto err_init_rdma;
5212
5213	ice_init_features(pf);
5214	ice_service_task_restart(pf);
5215
5216	clear_bit(ICE_DOWN, pf->state);
5217
5218	return 0;
5219
5220err_init_rdma:
5221	ice_tc_indir_block_unregister(vsi);
5222err_tc_indir_block_register:
5223	ice_unregister_netdev(vsi);
5224err_register_netdev:
5225	ice_devlink_destroy_pf_port(pf);
5226err_devlink_create_pf_port:
5227err_init_mac_fltr:
5228	ice_decfg_netdev(vsi);
5229	return err;
5230}
5231
5232/**
5233 * ice_unload - unload pf by stopping VSI and deinit hw
5234 * @pf: pointer to the pf instance
5235 *
5236 * This function has to be called under devl_lock.
5237 */
5238void ice_unload(struct ice_pf *pf)
5239{
5240	struct ice_vsi *vsi = ice_get_main_vsi(pf);
5241
5242	devl_assert_locked(priv_to_devlink(pf));
5243
5244	ice_deinit_features(pf);
5245	ice_deinit_rdma(pf);
5246	ice_tc_indir_block_unregister(vsi);
5247	ice_unregister_netdev(vsi);
5248	ice_devlink_destroy_pf_port(pf);
5249	ice_decfg_netdev(vsi);
5250}
5251
5252/**
5253 * ice_probe - Device initialization routine
5254 * @pdev: PCI device information struct
5255 * @ent: entry in ice_pci_tbl
5256 *
5257 * Returns 0 on success, negative on failure
5258 */
5259static int
5260ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5261{
5262	struct device *dev = &pdev->dev;
5263	struct ice_adapter *adapter;
5264	struct ice_pf *pf;
5265	struct ice_hw *hw;
5266	int err;
5267
5268	if (pdev->is_virtfn) {
5269		dev_err(dev, "can't probe a virtual function\n");
5270		return -EINVAL;
5271	}
5272
5273	/* when under a kdump kernel initiate a reset before enabling the
5274	 * device in order to clear out any pending DMA transactions. These
5275	 * transactions can cause some systems to machine check when doing
5276	 * the pcim_enable_device() below.
5277	 */
5278	if (is_kdump_kernel()) {
5279		pci_save_state(pdev);
5280		pci_clear_master(pdev);
5281		err = pcie_flr(pdev);
5282		if (err)
5283			return err;
5284		pci_restore_state(pdev);
5285	}
5286
5287	/* this driver uses devres, see
5288	 * Documentation/driver-api/driver-model/devres.rst
5289	 */
5290	err = pcim_enable_device(pdev);
5291	if (err)
5292		return err;
5293
5294	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5295	if (err) {
5296		dev_err(dev, "BAR0 I/O map error %d\n", err);
5297		return err;
5298	}
5299
5300	pf = ice_allocate_pf(dev);
5301	if (!pf)
5302		return -ENOMEM;
5303
5304	/* initialize Auxiliary index to invalid value */
5305	pf->aux_idx = -1;
5306
5307	/* set up for high or low DMA */
5308	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5309	if (err) {
5310		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5311		return err;
5312	}
5313
5314	pci_set_master(pdev);
5315
5316	adapter = ice_adapter_get(pdev);
5317	if (IS_ERR(adapter))
5318		return PTR_ERR(adapter);
5319
5320	pf->pdev = pdev;
5321	pf->adapter = adapter;
5322	pci_set_drvdata(pdev, pf);
5323	set_bit(ICE_DOWN, pf->state);
5324	/* Disable service task until DOWN bit is cleared */
5325	set_bit(ICE_SERVICE_DIS, pf->state);
5326
5327	hw = &pf->hw;
5328	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5329	pci_save_state(pdev);
5330
5331	hw->back = pf;
5332	hw->port_info = NULL;
5333	hw->vendor_id = pdev->vendor;
5334	hw->device_id = pdev->device;
5335	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5336	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5337	hw->subsystem_device_id = pdev->subsystem_device;
5338	hw->bus.device = PCI_SLOT(pdev->devfn);
5339	hw->bus.func = PCI_FUNC(pdev->devfn);
5340	ice_set_ctrlq_len(hw);
5341
5342	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5343
5344#ifndef CONFIG_DYNAMIC_DEBUG
5345	if (debug < -1)
5346		hw->debug_mask = debug;
5347#endif
5348
5349	err = ice_init(pf);
5350	if (err)
5351		goto err_init;
5352
5353	devl_lock(priv_to_devlink(pf));
5354	err = ice_load(pf);
5355	if (err)
5356		goto err_load;
5357
5358	err = ice_init_devlink(pf);
5359	if (err)
5360		goto err_init_devlink;
5361	devl_unlock(priv_to_devlink(pf));
5362
5363	return 0;
5364
5365err_init_devlink:
5366	ice_unload(pf);
5367err_load:
5368	devl_unlock(priv_to_devlink(pf));
5369	ice_deinit(pf);
5370err_init:
5371	ice_adapter_put(pdev);
5372	return err;
5373}
5374
5375/**
5376 * ice_set_wake - enable or disable Wake on LAN
5377 * @pf: pointer to the PF struct
5378 *
5379 * Simple helper for WoL control
5380 */
5381static void ice_set_wake(struct ice_pf *pf)
5382{
5383	struct ice_hw *hw = &pf->hw;
5384	bool wol = pf->wol_ena;
5385
5386	/* clear wake state, otherwise new wake events won't fire */
5387	wr32(hw, PFPM_WUS, U32_MAX);
5388
5389	/* enable / disable APM wake up, no RMW needed */
5390	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5391
5392	/* set magic packet filter enabled */
5393	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5394}
5395
5396/**
5397 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5398 * @pf: pointer to the PF struct
5399 *
5400 * Issue firmware command to enable multicast magic wake, making
5401 * sure that any locally administered address (LAA) is used for
5402 * wake, and that PF reset doesn't undo the LAA.
5403 */
5404static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5405{
5406	struct device *dev = ice_pf_to_dev(pf);
5407	struct ice_hw *hw = &pf->hw;
 
5408	u8 mac_addr[ETH_ALEN];
5409	struct ice_vsi *vsi;
5410	int status;
5411	u8 flags;
5412
5413	if (!pf->wol_ena)
5414		return;
5415
5416	vsi = ice_get_main_vsi(pf);
5417	if (!vsi)
5418		return;
5419
5420	/* Get current MAC address in case it's an LAA */
5421	if (vsi->netdev)
5422		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5423	else
5424		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5425
5426	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5427		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5428		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5429
5430	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5431	if (status)
5432		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5433			status, ice_aq_str(hw->adminq.sq_last_status));
 
5434}
5435
5436/**
5437 * ice_remove - Device removal routine
5438 * @pdev: PCI device information struct
5439 */
5440static void ice_remove(struct pci_dev *pdev)
5441{
5442	struct ice_pf *pf = pci_get_drvdata(pdev);
5443	int i;
5444
 
 
 
5445	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5446		if (!ice_is_reset_in_progress(pf->state))
5447			break;
5448		msleep(100);
5449	}
5450
5451	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5452		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5453		ice_free_vfs(pf);
5454	}
5455
5456	ice_hwmon_exit(pf);
5457
5458	ice_service_task_stop(pf);
 
5459	ice_aq_cancel_waiting_tasks(pf);
 
 
 
5460	set_bit(ICE_DOWN, pf->state);
5461
 
 
 
 
5462	if (!ice_is_safe_mode(pf))
5463		ice_remove_arfs(pf);
5464
5465	devl_lock(priv_to_devlink(pf));
5466	ice_dealloc_all_dynamic_ports(pf);
5467	ice_deinit_devlink(pf);
5468
5469	ice_unload(pf);
5470	devl_unlock(priv_to_devlink(pf));
5471
5472	ice_deinit(pf);
5473	ice_vsi_release_all(pf);
5474
5475	ice_setup_mc_magic_wake(pf);
 
5476	ice_set_wake(pf);
 
 
 
 
 
 
 
 
 
 
5477
5478	ice_adapter_put(pdev);
 
 
 
 
 
 
 
 
5479}
5480
5481/**
5482 * ice_shutdown - PCI callback for shutting down device
5483 * @pdev: PCI device information struct
5484 */
5485static void ice_shutdown(struct pci_dev *pdev)
5486{
5487	struct ice_pf *pf = pci_get_drvdata(pdev);
5488
5489	ice_remove(pdev);
5490
5491	if (system_state == SYSTEM_POWER_OFF) {
5492		pci_wake_from_d3(pdev, pf->wol_ena);
5493		pci_set_power_state(pdev, PCI_D3hot);
5494	}
5495}
5496
 
5497/**
5498 * ice_prepare_for_shutdown - prep for PCI shutdown
5499 * @pf: board private structure
5500 *
5501 * Inform or close all dependent features in prep for PCI device shutdown
5502 */
5503static void ice_prepare_for_shutdown(struct ice_pf *pf)
5504{
5505	struct ice_hw *hw = &pf->hw;
5506	u32 v;
5507
5508	/* Notify VFs of impending reset */
5509	if (ice_check_sq_alive(hw, &hw->mailboxq))
5510		ice_vc_notify_reset(pf);
5511
5512	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5513
5514	/* disable the VSIs and their queues that are not already DOWN */
5515	ice_pf_dis_all_vsi(pf, false);
5516
5517	ice_for_each_vsi(pf, v)
5518		if (pf->vsi[v])
5519			pf->vsi[v]->vsi_num = 0;
5520
5521	ice_shutdown_all_ctrlq(hw, true);
5522}
5523
5524/**
5525 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5526 * @pf: board private structure to reinitialize
5527 *
5528 * This routine reinitialize interrupt scheme that was cleared during
5529 * power management suspend callback.
5530 *
5531 * This should be called during resume routine to re-allocate the q_vectors
5532 * and reacquire interrupts.
5533 */
5534static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5535{
5536	struct device *dev = ice_pf_to_dev(pf);
5537	int ret, v;
5538
5539	/* Since we clear MSIX flag during suspend, we need to
5540	 * set it back during resume...
5541	 */
5542
5543	ret = ice_init_interrupt_scheme(pf);
5544	if (ret) {
5545		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5546		return ret;
5547	}
5548
5549	/* Remap vectors and rings, after successful re-init interrupts */
5550	ice_for_each_vsi(pf, v) {
5551		if (!pf->vsi[v])
5552			continue;
5553
5554		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5555		if (ret)
5556			goto err_reinit;
5557		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5558		rtnl_lock();
5559		ice_vsi_set_napi_queues(pf->vsi[v]);
5560		rtnl_unlock();
5561	}
5562
5563	ret = ice_req_irq_msix_misc(pf);
5564	if (ret) {
5565		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5566			ret);
5567		goto err_reinit;
5568	}
5569
5570	return 0;
5571
5572err_reinit:
5573	while (v--)
5574		if (pf->vsi[v]) {
5575			rtnl_lock();
5576			ice_vsi_clear_napi_queues(pf->vsi[v]);
5577			rtnl_unlock();
5578			ice_vsi_free_q_vectors(pf->vsi[v]);
5579		}
5580
5581	return ret;
5582}
5583
5584/**
5585 * ice_suspend
5586 * @dev: generic device information structure
5587 *
5588 * Power Management callback to quiesce the device and prepare
5589 * for D3 transition.
5590 */
5591static int ice_suspend(struct device *dev)
5592{
5593	struct pci_dev *pdev = to_pci_dev(dev);
5594	struct ice_pf *pf;
5595	int disabled, v;
5596
5597	pf = pci_get_drvdata(pdev);
5598
5599	if (!ice_pf_state_is_nominal(pf)) {
5600		dev_err(dev, "Device is not ready, no need to suspend it\n");
5601		return -EBUSY;
5602	}
5603
5604	/* Stop watchdog tasks until resume completion.
5605	 * Even though it is most likely that the service task is
5606	 * disabled if the device is suspended or down, the service task's
5607	 * state is controlled by a different state bit, and we should
5608	 * store and honor whatever state that bit is in at this point.
5609	 */
5610	disabled = ice_service_task_stop(pf);
5611
5612	ice_deinit_rdma(pf);
5613
5614	/* Already suspended?, then there is nothing to do */
5615	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5616		if (!disabled)
5617			ice_service_task_restart(pf);
5618		return 0;
5619	}
5620
5621	if (test_bit(ICE_DOWN, pf->state) ||
5622	    ice_is_reset_in_progress(pf->state)) {
5623		dev_err(dev, "can't suspend device in reset or already down\n");
5624		if (!disabled)
5625			ice_service_task_restart(pf);
5626		return 0;
5627	}
5628
5629	ice_setup_mc_magic_wake(pf);
5630
5631	ice_prepare_for_shutdown(pf);
5632
5633	ice_set_wake(pf);
5634
5635	/* Free vectors, clear the interrupt scheme and release IRQs
5636	 * for proper hibernation, especially with large number of CPUs.
5637	 * Otherwise hibernation might fail when mapping all the vectors back
5638	 * to CPU0.
5639	 */
5640	ice_free_irq_msix_misc(pf);
5641	ice_for_each_vsi(pf, v) {
5642		if (!pf->vsi[v])
5643			continue;
5644		rtnl_lock();
5645		ice_vsi_clear_napi_queues(pf->vsi[v]);
5646		rtnl_unlock();
5647		ice_vsi_free_q_vectors(pf->vsi[v]);
5648	}
 
5649	ice_clear_interrupt_scheme(pf);
5650
5651	pci_save_state(pdev);
5652	pci_wake_from_d3(pdev, pf->wol_ena);
5653	pci_set_power_state(pdev, PCI_D3hot);
5654	return 0;
5655}
5656
5657/**
5658 * ice_resume - PM callback for waking up from D3
5659 * @dev: generic device information structure
5660 */
5661static int ice_resume(struct device *dev)
5662{
5663	struct pci_dev *pdev = to_pci_dev(dev);
5664	enum ice_reset_req reset_type;
5665	struct ice_pf *pf;
5666	struct ice_hw *hw;
5667	int ret;
5668
5669	pci_set_power_state(pdev, PCI_D0);
5670	pci_restore_state(pdev);
5671	pci_save_state(pdev);
5672
5673	if (!pci_device_is_present(pdev))
5674		return -ENODEV;
5675
5676	ret = pci_enable_device_mem(pdev);
5677	if (ret) {
5678		dev_err(dev, "Cannot enable device after suspend\n");
5679		return ret;
5680	}
5681
5682	pf = pci_get_drvdata(pdev);
5683	hw = &pf->hw;
5684
5685	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5686	ice_print_wake_reason(pf);
5687
5688	/* We cleared the interrupt scheme when we suspended, so we need to
5689	 * restore it now to resume device functionality.
5690	 */
5691	ret = ice_reinit_interrupt_scheme(pf);
5692	if (ret)
5693		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5694
5695	ret = ice_init_rdma(pf);
5696	if (ret)
5697		dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5698			ret);
5699
5700	clear_bit(ICE_DOWN, pf->state);
5701	/* Now perform PF reset and rebuild */
5702	reset_type = ICE_RESET_PFR;
5703	/* re-enable service task for reset, but allow reset to schedule it */
5704	clear_bit(ICE_SERVICE_DIS, pf->state);
5705
5706	if (ice_schedule_reset(pf, reset_type))
5707		dev_err(dev, "Reset during resume failed.\n");
5708
5709	clear_bit(ICE_SUSPENDED, pf->state);
5710	ice_service_task_restart(pf);
5711
5712	/* Restart the service task */
5713	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5714
5715	return 0;
5716}
 
5717
5718/**
5719 * ice_pci_err_detected - warning that PCI error has been detected
5720 * @pdev: PCI device information struct
5721 * @err: the type of PCI error
5722 *
5723 * Called to warn that something happened on the PCI bus and the error handling
5724 * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5725 */
5726static pci_ers_result_t
5727ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5728{
5729	struct ice_pf *pf = pci_get_drvdata(pdev);
5730
5731	if (!pf) {
5732		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5733			__func__, err);
5734		return PCI_ERS_RESULT_DISCONNECT;
5735	}
5736
5737	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5738		ice_service_task_stop(pf);
5739
5740		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5741			set_bit(ICE_PFR_REQ, pf->state);
5742			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5743		}
5744	}
5745
5746	return PCI_ERS_RESULT_NEED_RESET;
5747}
5748
5749/**
5750 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5751 * @pdev: PCI device information struct
5752 *
5753 * Called to determine if the driver can recover from the PCI slot reset by
5754 * using a register read to determine if the device is recoverable.
5755 */
5756static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5757{
5758	struct ice_pf *pf = pci_get_drvdata(pdev);
5759	pci_ers_result_t result;
5760	int err;
5761	u32 reg;
5762
5763	err = pci_enable_device_mem(pdev);
5764	if (err) {
5765		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5766			err);
5767		result = PCI_ERS_RESULT_DISCONNECT;
5768	} else {
5769		pci_set_master(pdev);
5770		pci_restore_state(pdev);
5771		pci_save_state(pdev);
5772		pci_wake_from_d3(pdev, false);
5773
5774		/* Check for life */
5775		reg = rd32(&pf->hw, GLGEN_RTRIG);
5776		if (!reg)
5777			result = PCI_ERS_RESULT_RECOVERED;
5778		else
5779			result = PCI_ERS_RESULT_DISCONNECT;
5780	}
5781
 
 
 
 
 
 
5782	return result;
5783}
5784
5785/**
5786 * ice_pci_err_resume - restart operations after PCI error recovery
5787 * @pdev: PCI device information struct
5788 *
5789 * Called to allow the driver to bring things back up after PCI error and/or
5790 * reset recovery have finished
5791 */
5792static void ice_pci_err_resume(struct pci_dev *pdev)
5793{
5794	struct ice_pf *pf = pci_get_drvdata(pdev);
5795
5796	if (!pf) {
5797		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5798			__func__);
5799		return;
5800	}
5801
5802	if (test_bit(ICE_SUSPENDED, pf->state)) {
5803		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5804			__func__);
5805		return;
5806	}
5807
5808	ice_restore_all_vfs_msi_state(pf);
5809
5810	ice_do_reset(pf, ICE_RESET_PFR);
5811	ice_service_task_restart(pf);
5812	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5813}
5814
5815/**
5816 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5817 * @pdev: PCI device information struct
5818 */
5819static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5820{
5821	struct ice_pf *pf = pci_get_drvdata(pdev);
5822
5823	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5824		ice_service_task_stop(pf);
5825
5826		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5827			set_bit(ICE_PFR_REQ, pf->state);
5828			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5829		}
5830	}
5831}
5832
5833/**
5834 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5835 * @pdev: PCI device information struct
5836 */
5837static void ice_pci_err_reset_done(struct pci_dev *pdev)
5838{
5839	ice_pci_err_resume(pdev);
5840}
5841
5842/* ice_pci_tbl - PCI Device ID Table
5843 *
5844 * Wildcard entries (PCI_ANY_ID) should come last
5845 * Last entry must be all 0s
5846 *
5847 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5848 *   Class, Class Mask, private data (not used) }
5849 */
5850static const struct pci_device_id ice_pci_tbl[] = {
5851	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5852	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5853	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5854	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5855	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5856	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5857	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5858	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5859	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5860	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5861	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5862	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5863	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5864	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5865	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5866	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5867	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5868	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5869	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5870	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5871	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5872	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5873	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5874	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5875	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5876	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5877	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
5878	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
5879	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
5880	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
5881	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
5882	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
5883	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
5884	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
5885	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
5886	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
5887	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
5888	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
5889	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
5890	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
5891	/* required last entry */
5892	{}
5893};
5894MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5895
5896static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5897
5898static const struct pci_error_handlers ice_pci_err_handler = {
5899	.error_detected = ice_pci_err_detected,
5900	.slot_reset = ice_pci_err_slot_reset,
5901	.reset_prepare = ice_pci_err_reset_prepare,
5902	.reset_done = ice_pci_err_reset_done,
5903	.resume = ice_pci_err_resume
5904};
5905
5906static struct pci_driver ice_driver = {
5907	.name = KBUILD_MODNAME,
5908	.id_table = ice_pci_tbl,
5909	.probe = ice_probe,
5910	.remove = ice_remove,
5911	.driver.pm = pm_sleep_ptr(&ice_pm_ops),
 
 
5912	.shutdown = ice_shutdown,
5913	.sriov_configure = ice_sriov_configure,
5914	.sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5915	.sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5916	.err_handler = &ice_pci_err_handler
5917};
5918
5919/**
5920 * ice_module_init - Driver registration routine
5921 *
5922 * ice_module_init is the first routine called when the driver is
5923 * loaded. All it does is register with the PCI subsystem.
5924 */
5925static int __init ice_module_init(void)
5926{
5927	int status = -ENOMEM;
5928
5929	pr_info("%s\n", ice_driver_string);
5930	pr_info("%s\n", ice_copyright);
5931
5932	ice_adv_lnk_speed_maps_init();
5933
5934	ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME);
5935	if (!ice_wq) {
5936		pr_err("Failed to create workqueue\n");
5937		return status;
5938	}
5939
5940	ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5941	if (!ice_lag_wq) {
5942		pr_err("Failed to create LAG workqueue\n");
5943		goto err_dest_wq;
5944	}
5945
5946	ice_debugfs_init();
5947
5948	status = pci_register_driver(&ice_driver);
5949	if (status) {
5950		pr_err("failed to register PCI driver, err %d\n", status);
5951		goto err_dest_lag_wq;
5952	}
5953
5954	status = ice_sf_driver_register();
5955	if (status) {
5956		pr_err("Failed to register SF driver, err %d\n", status);
5957		goto err_sf_driver;
5958	}
5959
5960	return 0;
5961
5962err_sf_driver:
5963	pci_unregister_driver(&ice_driver);
5964err_dest_lag_wq:
5965	destroy_workqueue(ice_lag_wq);
5966	ice_debugfs_exit();
5967err_dest_wq:
5968	destroy_workqueue(ice_wq);
5969	return status;
5970}
5971module_init(ice_module_init);
5972
5973/**
5974 * ice_module_exit - Driver exit cleanup routine
5975 *
5976 * ice_module_exit is called just before the driver is removed
5977 * from memory.
5978 */
5979static void __exit ice_module_exit(void)
5980{
5981	ice_sf_driver_unregister();
5982	pci_unregister_driver(&ice_driver);
5983	ice_debugfs_exit();
5984	destroy_workqueue(ice_wq);
5985	destroy_workqueue(ice_lag_wq);
5986	pr_info("module unloaded\n");
5987}
5988module_exit(ice_module_exit);
5989
5990/**
5991 * ice_set_mac_address - NDO callback to set MAC address
5992 * @netdev: network interface device structure
5993 * @pi: pointer to an address structure
5994 *
5995 * Returns 0 on success, negative on failure
5996 */
5997static int ice_set_mac_address(struct net_device *netdev, void *pi)
5998{
5999	struct ice_netdev_priv *np = netdev_priv(netdev);
6000	struct ice_vsi *vsi = np->vsi;
6001	struct ice_pf *pf = vsi->back;
6002	struct ice_hw *hw = &pf->hw;
6003	struct sockaddr *addr = pi;
 
6004	u8 old_mac[ETH_ALEN];
6005	u8 flags = 0;
 
6006	u8 *mac;
6007	int err;
6008
6009	mac = (u8 *)addr->sa_data;
6010
6011	if (!is_valid_ether_addr(mac))
6012		return -EADDRNOTAVAIL;
6013
 
 
 
 
 
6014	if (test_bit(ICE_DOWN, pf->state) ||
6015	    ice_is_reset_in_progress(pf->state)) {
6016		netdev_err(netdev, "can't set mac %pM. device not ready\n",
6017			   mac);
6018		return -EBUSY;
6019	}
6020
6021	if (ice_chnl_dmac_fltr_cnt(pf)) {
6022		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
6023			   mac);
6024		return -EAGAIN;
6025	}
6026
6027	netif_addr_lock_bh(netdev);
6028	ether_addr_copy(old_mac, netdev->dev_addr);
6029	/* change the netdev's MAC address */
6030	eth_hw_addr_set(netdev, mac);
6031	netif_addr_unlock_bh(netdev);
6032
6033	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
6034	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
6035	if (err && err != -ENOENT) {
6036		err = -EADDRNOTAVAIL;
6037		goto err_update_filters;
6038	}
6039
6040	/* Add filter for new MAC. If filter exists, return success */
6041	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
6042	if (err == -EEXIST) {
6043		/* Although this MAC filter is already present in hardware it's
6044		 * possible in some cases (e.g. bonding) that dev_addr was
6045		 * modified outside of the driver and needs to be restored back
6046		 * to this value.
6047		 */
6048		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
6049
6050		return 0;
6051	} else if (err) {
6052		/* error if the new filter addition failed */
6053		err = -EADDRNOTAVAIL;
6054	}
6055
6056err_update_filters:
6057	if (err) {
6058		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
6059			   mac);
6060		netif_addr_lock_bh(netdev);
6061		eth_hw_addr_set(netdev, old_mac);
6062		netif_addr_unlock_bh(netdev);
6063		return err;
6064	}
6065
6066	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
6067		   netdev->dev_addr);
6068
6069	/* write new MAC address to the firmware */
6070	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
6071	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
6072	if (err) {
6073		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
6074			   mac, err);
6075	}
6076	return 0;
6077}
6078
6079/**
6080 * ice_set_rx_mode - NDO callback to set the netdev filters
6081 * @netdev: network interface device structure
6082 */
6083static void ice_set_rx_mode(struct net_device *netdev)
6084{
6085	struct ice_netdev_priv *np = netdev_priv(netdev);
6086	struct ice_vsi *vsi = np->vsi;
6087
6088	if (!vsi || ice_is_switchdev_running(vsi->back))
6089		return;
6090
6091	/* Set the flags to synchronize filters
6092	 * ndo_set_rx_mode may be triggered even without a change in netdev
6093	 * flags
6094	 */
6095	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
6096	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
6097	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
6098
6099	/* schedule our worker thread which will take care of
6100	 * applying the new filter changes
6101	 */
6102	ice_service_task_schedule(vsi->back);
6103}
6104
6105/**
6106 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6107 * @netdev: network interface device structure
6108 * @queue_index: Queue ID
6109 * @maxrate: maximum bandwidth in Mbps
6110 */
6111static int
6112ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
6113{
6114	struct ice_netdev_priv *np = netdev_priv(netdev);
6115	struct ice_vsi *vsi = np->vsi;
 
6116	u16 q_handle;
6117	int status;
6118	u8 tc;
6119
6120	/* Validate maxrate requested is within permitted range */
6121	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
6122		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
6123			   maxrate, queue_index);
6124		return -EINVAL;
6125	}
6126
6127	q_handle = vsi->tx_rings[queue_index]->q_handle;
6128	tc = ice_dcb_get_tc(vsi, queue_index);
6129
6130	vsi = ice_locate_vsi_using_queue(vsi, queue_index);
6131	if (!vsi) {
6132		netdev_err(netdev, "Invalid VSI for given queue %d\n",
6133			   queue_index);
6134		return -EINVAL;
6135	}
6136
6137	/* Set BW back to default, when user set maxrate to 0 */
6138	if (!maxrate)
6139		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
6140					       q_handle, ICE_MAX_BW);
6141	else
6142		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
6143					  q_handle, ICE_MAX_BW, maxrate * 1000);
6144	if (status)
6145		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
6146			   status);
 
 
6147
6148	return status;
6149}
6150
6151/**
6152 * ice_fdb_add - add an entry to the hardware database
6153 * @ndm: the input from the stack
6154 * @tb: pointer to array of nladdr (unused)
6155 * @dev: the net device pointer
6156 * @addr: the MAC address entry being added
6157 * @vid: VLAN ID
6158 * @flags: instructions from stack about fdb operation
6159 * @notified: whether notification was emitted
6160 * @extack: netlink extended ack
6161 */
6162static int
6163ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
6164	    struct net_device *dev, const unsigned char *addr, u16 vid,
6165	    u16 flags, bool *notified,
6166	    struct netlink_ext_ack __always_unused *extack)
6167{
6168	int err;
6169
6170	if (vid) {
6171		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
6172		return -EINVAL;
6173	}
6174	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6175		netdev_err(dev, "FDB only supports static addresses\n");
6176		return -EINVAL;
6177	}
6178
6179	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6180		err = dev_uc_add_excl(dev, addr);
6181	else if (is_multicast_ether_addr(addr))
6182		err = dev_mc_add_excl(dev, addr);
6183	else
6184		err = -EINVAL;
6185
6186	/* Only return duplicate errors if NLM_F_EXCL is set */
6187	if (err == -EEXIST && !(flags & NLM_F_EXCL))
6188		err = 0;
6189
6190	return err;
6191}
6192
6193/**
6194 * ice_fdb_del - delete an entry from the hardware database
6195 * @ndm: the input from the stack
6196 * @tb: pointer to array of nladdr (unused)
6197 * @dev: the net device pointer
6198 * @addr: the MAC address entry being added
6199 * @vid: VLAN ID
6200 * @notified: whether notification was emitted
6201 * @extack: netlink extended ack
6202 */
6203static int
6204ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6205	    struct net_device *dev, const unsigned char *addr,
6206	    __always_unused u16 vid, bool *notified,
6207	    struct netlink_ext_ack *extack)
6208{
6209	int err;
6210
6211	if (ndm->ndm_state & NUD_PERMANENT) {
6212		netdev_err(dev, "FDB only supports static addresses\n");
6213		return -EINVAL;
6214	}
6215
6216	if (is_unicast_ether_addr(addr))
6217		err = dev_uc_del(dev, addr);
6218	else if (is_multicast_ether_addr(addr))
6219		err = dev_mc_del(dev, addr);
6220	else
6221		err = -EINVAL;
6222
6223	return err;
6224}
6225
6226#define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6227					 NETIF_F_HW_VLAN_CTAG_TX | \
6228					 NETIF_F_HW_VLAN_STAG_RX | \
6229					 NETIF_F_HW_VLAN_STAG_TX)
6230
6231#define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6232					 NETIF_F_HW_VLAN_STAG_RX)
6233
6234#define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
6235					 NETIF_F_HW_VLAN_STAG_FILTER)
6236
6237/**
6238 * ice_fix_features - fix the netdev features flags based on device limitations
6239 * @netdev: ptr to the netdev that flags are being fixed on
6240 * @features: features that need to be checked and possibly fixed
6241 *
6242 * Make sure any fixups are made to features in this callback. This enables the
6243 * driver to not have to check unsupported configurations throughout the driver
6244 * because that's the responsiblity of this callback.
6245 *
6246 * Single VLAN Mode (SVM) Supported Features:
6247 *	NETIF_F_HW_VLAN_CTAG_FILTER
6248 *	NETIF_F_HW_VLAN_CTAG_RX
6249 *	NETIF_F_HW_VLAN_CTAG_TX
6250 *
6251 * Double VLAN Mode (DVM) Supported Features:
6252 *	NETIF_F_HW_VLAN_CTAG_FILTER
6253 *	NETIF_F_HW_VLAN_CTAG_RX
6254 *	NETIF_F_HW_VLAN_CTAG_TX
6255 *
6256 *	NETIF_F_HW_VLAN_STAG_FILTER
6257 *	NETIF_HW_VLAN_STAG_RX
6258 *	NETIF_HW_VLAN_STAG_TX
6259 *
6260 * Features that need fixing:
6261 *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6262 *	These are mutually exlusive as the VSI context cannot support multiple
6263 *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
6264 *	is not done, then default to clearing the requested STAG offload
6265 *	settings.
6266 *
6267 *	All supported filtering has to be enabled or disabled together. For
6268 *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6269 *	together. If this is not done, then default to VLAN filtering disabled.
6270 *	These are mutually exclusive as there is currently no way to
6271 *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6272 *	prune rules.
6273 */
6274static netdev_features_t
6275ice_fix_features(struct net_device *netdev, netdev_features_t features)
6276{
6277	struct ice_netdev_priv *np = netdev_priv(netdev);
6278	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6279	bool cur_ctag, cur_stag, req_ctag, req_stag;
6280
6281	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6282	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6283	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6284
6285	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6286	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6287	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6288
6289	if (req_vlan_fltr != cur_vlan_fltr) {
6290		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6291			if (req_ctag && req_stag) {
6292				features |= NETIF_VLAN_FILTERING_FEATURES;
6293			} else if (!req_ctag && !req_stag) {
6294				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6295			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
6296				   (!cur_stag && req_stag && !cur_ctag)) {
6297				features |= NETIF_VLAN_FILTERING_FEATURES;
6298				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6299			} else if ((cur_ctag && !req_ctag && cur_stag) ||
6300				   (cur_stag && !req_stag && cur_ctag)) {
6301				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6302				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6303			}
6304		} else {
6305			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6306				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6307
6308			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6309				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6310		}
6311	}
6312
6313	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6314	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6315		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6316		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6317			      NETIF_F_HW_VLAN_STAG_TX);
6318	}
6319
6320	if (!(netdev->features & NETIF_F_RXFCS) &&
6321	    (features & NETIF_F_RXFCS) &&
6322	    (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6323	    !ice_vsi_has_non_zero_vlans(np->vsi)) {
6324		netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6325		features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6326	}
6327
6328	return features;
6329}
6330
6331/**
6332 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6333 * @vsi: PF's VSI
6334 * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
6335 *
6336 * Store current stripped VLAN proto in ring packet context,
6337 * so it can be accessed more efficiently by packet processing code.
6338 */
6339static void
6340ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6341{
6342	u16 i;
6343
6344	ice_for_each_alloc_rxq(vsi, i)
6345		vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6346}
6347
6348/**
6349 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6350 * @vsi: PF's VSI
6351 * @features: features used to determine VLAN offload settings
6352 *
6353 * First, determine the vlan_ethertype based on the VLAN offload bits in
6354 * features. Then determine if stripping and insertion should be enabled or
6355 * disabled. Finally enable or disable VLAN stripping and insertion.
6356 */
6357static int
6358ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6359{
6360	bool enable_stripping = true, enable_insertion = true;
6361	struct ice_vsi_vlan_ops *vlan_ops;
6362	int strip_err = 0, insert_err = 0;
6363	u16 vlan_ethertype = 0;
6364
6365	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6366
6367	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6368		vlan_ethertype = ETH_P_8021AD;
6369	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6370		vlan_ethertype = ETH_P_8021Q;
6371
6372	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6373		enable_stripping = false;
6374	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6375		enable_insertion = false;
6376
6377	if (enable_stripping)
6378		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6379	else
6380		strip_err = vlan_ops->dis_stripping(vsi);
6381
6382	if (enable_insertion)
6383		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6384	else
6385		insert_err = vlan_ops->dis_insertion(vsi);
6386
6387	if (strip_err || insert_err)
6388		return -EIO;
6389
6390	ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6391				    htons(vlan_ethertype) : 0);
6392
6393	return 0;
6394}
6395
6396/**
6397 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6398 * @vsi: PF's VSI
6399 * @features: features used to determine VLAN filtering settings
6400 *
6401 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6402 * features.
6403 */
6404static int
6405ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6406{
6407	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6408	int err = 0;
6409
6410	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6411	 * if either bit is set. In switchdev mode Rx filtering should never be
6412	 * enabled.
6413	 */
6414	if ((features &
6415	     (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
6416	     !ice_is_eswitch_mode_switchdev(vsi->back))
6417		err = vlan_ops->ena_rx_filtering(vsi);
6418	else
6419		err = vlan_ops->dis_rx_filtering(vsi);
6420
6421	return err;
6422}
6423
6424/**
6425 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6426 * @netdev: ptr to the netdev being adjusted
6427 * @features: the feature set that the stack is suggesting
6428 *
6429 * Only update VLAN settings if the requested_vlan_features are different than
6430 * the current_vlan_features.
6431 */
6432static int
6433ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6434{
6435	netdev_features_t current_vlan_features, requested_vlan_features;
6436	struct ice_netdev_priv *np = netdev_priv(netdev);
6437	struct ice_vsi *vsi = np->vsi;
6438	int err;
6439
6440	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6441	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6442	if (current_vlan_features ^ requested_vlan_features) {
6443		if ((features & NETIF_F_RXFCS) &&
6444		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6445			dev_err(ice_pf_to_dev(vsi->back),
6446				"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6447			return -EIO;
6448		}
6449
6450		err = ice_set_vlan_offload_features(vsi, features);
6451		if (err)
6452			return err;
6453	}
6454
6455	current_vlan_features = netdev->features &
6456		NETIF_VLAN_FILTERING_FEATURES;
6457	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6458	if (current_vlan_features ^ requested_vlan_features) {
6459		err = ice_set_vlan_filtering_features(vsi, features);
6460		if (err)
6461			return err;
6462	}
6463
6464	return 0;
6465}
6466
6467/**
6468 * ice_set_loopback - turn on/off loopback mode on underlying PF
6469 * @vsi: ptr to VSI
6470 * @ena: flag to indicate the on/off setting
6471 */
6472static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6473{
6474	bool if_running = netif_running(vsi->netdev);
6475	int ret;
6476
6477	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6478		ret = ice_down(vsi);
6479		if (ret) {
6480			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6481			return ret;
6482		}
6483	}
6484	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6485	if (ret)
6486		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6487	if (if_running)
6488		ret = ice_up(vsi);
6489
6490	return ret;
6491}
6492
6493/**
6494 * ice_set_features - set the netdev feature flags
6495 * @netdev: ptr to the netdev being adjusted
6496 * @features: the feature set that the stack is suggesting
6497 */
6498static int
6499ice_set_features(struct net_device *netdev, netdev_features_t features)
6500{
6501	netdev_features_t changed = netdev->features ^ features;
6502	struct ice_netdev_priv *np = netdev_priv(netdev);
6503	struct ice_vsi *vsi = np->vsi;
6504	struct ice_pf *pf = vsi->back;
6505	int ret = 0;
6506
6507	/* Don't set any netdev advanced features with device in Safe Mode */
6508	if (ice_is_safe_mode(pf)) {
6509		dev_err(ice_pf_to_dev(pf),
6510			"Device is in Safe Mode - not enabling advanced netdev features\n");
6511		return ret;
6512	}
6513
6514	/* Do not change setting during reset */
6515	if (ice_is_reset_in_progress(pf->state)) {
6516		dev_err(ice_pf_to_dev(pf),
6517			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6518		return -EBUSY;
6519	}
6520
6521	/* Multiple features can be changed in one call so keep features in
6522	 * separate if/else statements to guarantee each feature is checked
6523	 */
6524	if (changed & NETIF_F_RXHASH)
6525		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6526
6527	ret = ice_set_vlan_features(netdev, features);
6528	if (ret)
6529		return ret;
6530
6531	/* Turn on receive of FCS aka CRC, and after setting this
6532	 * flag the packet data will have the 4 byte CRC appended
6533	 */
6534	if (changed & NETIF_F_RXFCS) {
6535		if ((features & NETIF_F_RXFCS) &&
6536		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6537			dev_err(ice_pf_to_dev(vsi->back),
6538				"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6539			return -EIO;
6540		}
6541
6542		ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6543		ret = ice_down_up(vsi);
6544		if (ret)
6545			return ret;
6546	}
6547
6548	if (changed & NETIF_F_NTUPLE) {
6549		bool ena = !!(features & NETIF_F_NTUPLE);
6550
6551		ice_vsi_manage_fdir(vsi, ena);
6552		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
 
 
 
 
 
 
6553	}
6554
6555	/* don't turn off hw_tc_offload when ADQ is already enabled */
6556	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6557		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6558		return -EACCES;
6559	}
6560
6561	if (changed & NETIF_F_HW_TC) {
6562		bool ena = !!(features & NETIF_F_HW_TC);
6563
6564		assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena);
6565	}
6566
6567	if (changed & NETIF_F_LOOPBACK)
6568		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6569
6570	return ret;
6571}
6572
6573/**
6574 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6575 * @vsi: VSI to setup VLAN properties for
6576 */
6577static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6578{
6579	int err;
6580
6581	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6582	if (err)
6583		return err;
6584
6585	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6586	if (err)
6587		return err;
 
6588
6589	return ice_vsi_add_vlan_zero(vsi);
6590}
6591
6592/**
6593 * ice_vsi_cfg_lan - Setup the VSI lan related config
6594 * @vsi: the VSI being configured
6595 *
6596 * Return 0 on success and negative value on error
6597 */
6598int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6599{
6600	int err;
6601
6602	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6603		ice_set_rx_mode(vsi->netdev);
6604
6605		err = ice_vsi_vlan_setup(vsi);
 
6606		if (err)
6607			return err;
6608	}
6609	ice_vsi_cfg_dcb_rings(vsi);
6610
6611	err = ice_vsi_cfg_lan_txqs(vsi);
6612	if (!err && ice_is_xdp_ena_vsi(vsi))
6613		err = ice_vsi_cfg_xdp_txqs(vsi);
6614	if (!err)
6615		err = ice_vsi_cfg_rxqs(vsi);
6616
6617	return err;
6618}
6619
6620/* THEORY OF MODERATION:
6621 * The ice driver hardware works differently than the hardware that DIMLIB was
 
6622 * originally made for. ice hardware doesn't have packet count limits that
6623 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6624 * which is hard-coded to a limit of 250,000 ints/second.
6625 * If not using dynamic moderation, the INTRL value can be modified
6626 * by ethtool rx-usecs-high.
 
6627 */
6628struct ice_dim {
6629	/* the throttle rate for interrupts, basically worst case delay before
6630	 * an initial interrupt fires, value is stored in microseconds.
6631	 */
6632	u16 itr;
 
 
 
 
 
 
 
6633};
6634
6635/* Make a different profile for Rx that doesn't allow quite so aggressive
6636 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6637 * second.
 
 
 
 
6638 */
6639static const struct ice_dim rx_profile[] = {
6640	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6641	{8},    /* 125,000 ints/s */
6642	{16},   /*  62,500 ints/s */
6643	{62},   /*  16,129 ints/s */
6644	{126}   /*   7,936 ints/s */
6645};
6646
6647/* The transmit profile, which has the same sorts of values
6648 * as the previous struct
6649 */
6650static const struct ice_dim tx_profile[] = {
6651	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6652	{8},    /* 125,000 ints/s */
6653	{40},   /*  16,125 ints/s */
6654	{128},  /*   7,812 ints/s */
6655	{256}   /*   3,906 ints/s */
6656};
6657
6658static void ice_tx_dim_work(struct work_struct *work)
6659{
6660	struct ice_ring_container *rc;
 
6661	struct dim *dim;
6662	u16 itr;
6663
6664	dim = container_of(work, struct dim, work);
6665	rc = dim->priv;
 
6666
6667	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
 
6668
6669	/* look up the values in our local table */
6670	itr = tx_profile[dim->profile_ix].itr;
 
6671
6672	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6673	ice_write_itr(rc, itr);
 
6674
6675	dim->state = DIM_START_MEASURE;
6676}
6677
6678static void ice_rx_dim_work(struct work_struct *work)
6679{
6680	struct ice_ring_container *rc;
 
6681	struct dim *dim;
6682	u16 itr;
6683
6684	dim = container_of(work, struct dim, work);
6685	rc = dim->priv;
 
6686
6687	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
 
6688
6689	/* look up the values in our local table */
6690	itr = rx_profile[dim->profile_ix].itr;
 
6691
6692	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6693	ice_write_itr(rc, itr);
 
6694
6695	dim->state = DIM_START_MEASURE;
6696}
6697
6698#define ICE_DIM_DEFAULT_PROFILE_IX 1
6699
6700/**
6701 * ice_init_moderation - set up interrupt moderation
6702 * @q_vector: the vector containing rings to be configured
6703 *
6704 * Set up interrupt moderation registers, with the intent to do the right thing
6705 * when called from reset or from probe, and whether or not dynamic moderation
6706 * is enabled or not. Take special care to write all the registers in both
6707 * dynamic moderation mode or not in order to make sure hardware is in a known
6708 * state.
6709 */
6710static void ice_init_moderation(struct ice_q_vector *q_vector)
6711{
6712	struct ice_ring_container *rc;
6713	bool tx_dynamic, rx_dynamic;
6714
6715	rc = &q_vector->tx;
6716	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6717	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6718	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6719	rc->dim.priv = rc;
6720	tx_dynamic = ITR_IS_DYNAMIC(rc);
6721
6722	/* set the initial TX ITR to match the above */
6723	ice_write_itr(rc, tx_dynamic ?
6724		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6725
6726	rc = &q_vector->rx;
6727	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6728	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6729	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6730	rc->dim.priv = rc;
6731	rx_dynamic = ITR_IS_DYNAMIC(rc);
6732
6733	/* set the initial RX ITR to match the above */
6734	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6735				       rc->itr_setting);
6736
6737	ice_set_q_vector_intrl(q_vector);
6738}
6739
6740/**
6741 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6742 * @vsi: the VSI being configured
6743 */
6744static void ice_napi_enable_all(struct ice_vsi *vsi)
6745{
6746	int q_idx;
6747
6748	if (!vsi->netdev)
6749		return;
6750
6751	ice_for_each_q_vector(vsi, q_idx) {
6752		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6753
6754		ice_init_moderation(q_vector);
 
6755
6756		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
 
 
 
6757			napi_enable(&q_vector->napi);
6758	}
6759}
6760
6761/**
6762 * ice_up_complete - Finish the last steps of bringing up a connection
6763 * @vsi: The VSI being configured
6764 *
6765 * Return 0 on success and negative value on error
6766 */
6767static int ice_up_complete(struct ice_vsi *vsi)
6768{
6769	struct ice_pf *pf = vsi->back;
6770	int err;
6771
6772	ice_vsi_cfg_msix(vsi);
6773
6774	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6775	 * Tx queue group list was configured and the context bits were
6776	 * programmed using ice_vsi_cfg_txqs
6777	 */
6778	err = ice_vsi_start_all_rx_rings(vsi);
6779	if (err)
6780		return err;
6781
6782	clear_bit(ICE_VSI_DOWN, vsi->state);
6783	ice_napi_enable_all(vsi);
6784	ice_vsi_ena_irq(vsi);
6785
6786	if (vsi->port_info &&
6787	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6788	    ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
6789			      vsi->type == ICE_VSI_SF)))) {
6790		ice_print_link_msg(vsi, true);
6791		netif_tx_start_all_queues(vsi->netdev);
6792		netif_carrier_on(vsi->netdev);
6793		ice_ptp_link_change(pf, true);
6794	}
6795
6796	/* Perform an initial read of the statistics registers now to
6797	 * set the baseline so counters are ready when interface is up
6798	 */
6799	ice_update_eth_stats(vsi);
6800
6801	if (vsi->type == ICE_VSI_PF)
6802		ice_service_task_schedule(pf);
6803
6804	return 0;
6805}
6806
6807/**
6808 * ice_up - Bring the connection back up after being down
6809 * @vsi: VSI being configured
6810 */
6811int ice_up(struct ice_vsi *vsi)
6812{
6813	int err;
6814
6815	err = ice_vsi_cfg_lan(vsi);
6816	if (!err)
6817		err = ice_up_complete(vsi);
6818
6819	return err;
6820}
6821
6822/**
6823 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6824 * @syncp: pointer to u64_stats_sync
6825 * @stats: stats that pkts and bytes count will be taken from
6826 * @pkts: packets stats counter
6827 * @bytes: bytes stats counter
6828 *
6829 * This function fetches stats from the ring considering the atomic operations
6830 * that needs to be performed to read u64 values in 32 bit machine.
6831 */
6832void
6833ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6834			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6835{
6836	unsigned int start;
 
 
6837
 
 
6838	do {
6839		start = u64_stats_fetch_begin(syncp);
6840		*pkts = stats.pkts;
6841		*bytes = stats.bytes;
6842	} while (u64_stats_fetch_retry(syncp, start));
6843}
6844
6845/**
6846 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6847 * @vsi: the VSI to be updated
6848 * @vsi_stats: the stats struct to be updated
6849 * @rings: rings to work on
6850 * @count: number of rings
6851 */
6852static void
6853ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6854			     struct rtnl_link_stats64 *vsi_stats,
6855			     struct ice_tx_ring **rings, u16 count)
6856{
 
6857	u16 i;
6858
6859	for (i = 0; i < count; i++) {
6860		struct ice_tx_ring *ring;
6861		u64 pkts = 0, bytes = 0;
6862
6863		ring = READ_ONCE(rings[i]);
6864		if (!ring || !ring->ring_stats)
6865			continue;
6866		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6867					     ring->ring_stats->stats, &pkts,
6868					     &bytes);
6869		vsi_stats->tx_packets += pkts;
6870		vsi_stats->tx_bytes += bytes;
6871		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6872		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6873		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6874	}
6875}
6876
6877/**
6878 * ice_update_vsi_ring_stats - Update VSI stats counters
6879 * @vsi: the VSI to be updated
6880 */
6881static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6882{
6883	struct rtnl_link_stats64 *net_stats, *stats_prev;
6884	struct rtnl_link_stats64 *vsi_stats;
6885	struct ice_pf *pf = vsi->back;
6886	u64 pkts, bytes;
6887	int i;
6888
6889	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6890	if (!vsi_stats)
6891		return;
 
 
6892
6893	/* reset non-netdev (extended) stats */
6894	vsi->tx_restart = 0;
6895	vsi->tx_busy = 0;
6896	vsi->tx_linearize = 0;
6897	vsi->rx_buf_failed = 0;
6898	vsi->rx_page_failed = 0;
6899
6900	rcu_read_lock();
6901
6902	/* update Tx rings counters */
6903	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6904				     vsi->num_txq);
6905
6906	/* update Rx rings counters */
6907	ice_for_each_rxq(vsi, i) {
6908		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6909		struct ice_ring_stats *ring_stats;
6910
6911		ring_stats = ring->ring_stats;
6912		ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6913					     ring_stats->stats, &pkts,
6914					     &bytes);
6915		vsi_stats->rx_packets += pkts;
6916		vsi_stats->rx_bytes += bytes;
6917		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6918		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6919	}
6920
6921	/* update XDP Tx rings counters */
6922	if (ice_is_xdp_ena_vsi(vsi))
6923		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6924					     vsi->num_xdp_txq);
6925
6926	rcu_read_unlock();
6927
6928	net_stats = &vsi->net_stats;
6929	stats_prev = &vsi->net_stats_prev;
6930
6931	/* Update netdev counters, but keep in mind that values could start at
6932	 * random value after PF reset. And as we increase the reported stat by
6933	 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6934	 * let's skip this round.
6935	 */
6936	if (likely(pf->stat_prev_loaded)) {
6937		net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6938		net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6939		net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6940		net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6941	}
6942
6943	stats_prev->tx_packets = vsi_stats->tx_packets;
6944	stats_prev->tx_bytes = vsi_stats->tx_bytes;
6945	stats_prev->rx_packets = vsi_stats->rx_packets;
6946	stats_prev->rx_bytes = vsi_stats->rx_bytes;
6947
6948	kfree(vsi_stats);
6949}
6950
6951/**
6952 * ice_update_vsi_stats - Update VSI stats counters
6953 * @vsi: the VSI to be updated
6954 */
6955void ice_update_vsi_stats(struct ice_vsi *vsi)
6956{
6957	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6958	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6959	struct ice_pf *pf = vsi->back;
6960
6961	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6962	    test_bit(ICE_CFG_BUSY, pf->state))
6963		return;
6964
6965	/* get stats as recorded by Tx/Rx rings */
6966	ice_update_vsi_ring_stats(vsi);
6967
6968	/* get VSI stats as recorded by the hardware */
6969	ice_update_eth_stats(vsi);
6970
6971	cur_ns->tx_errors = cur_es->tx_errors;
6972	cur_ns->rx_dropped = cur_es->rx_discards;
6973	cur_ns->tx_dropped = cur_es->tx_discards;
6974	cur_ns->multicast = cur_es->rx_multicast;
6975
6976	/* update some more netdev stats if this is main VSI */
6977	if (vsi->type == ICE_VSI_PF) {
6978		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6979		cur_ns->rx_errors = pf->stats.crc_errors +
6980				    pf->stats.illegal_bytes +
 
6981				    pf->stats.rx_undersize +
6982				    pf->hw_csum_rx_error +
6983				    pf->stats.rx_jabber +
6984				    pf->stats.rx_fragments +
6985				    pf->stats.rx_oversize;
 
6986		/* record drops from the port level */
6987		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6988	}
6989}
6990
6991/**
6992 * ice_update_pf_stats - Update PF port stats counters
6993 * @pf: PF whose stats needs to be updated
6994 */
6995void ice_update_pf_stats(struct ice_pf *pf)
6996{
6997	struct ice_hw_port_stats *prev_ps, *cur_ps;
6998	struct ice_hw *hw = &pf->hw;
6999	u16 fd_ctr_base;
7000	u8 port;
7001
7002	port = hw->port_info->lport;
7003	prev_ps = &pf->stats_prev;
7004	cur_ps = &pf->stats;
7005
7006	if (ice_is_reset_in_progress(pf->state))
7007		pf->stat_prev_loaded = false;
7008
7009	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
7010			  &prev_ps->eth.rx_bytes,
7011			  &cur_ps->eth.rx_bytes);
7012
7013	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
7014			  &prev_ps->eth.rx_unicast,
7015			  &cur_ps->eth.rx_unicast);
7016
7017	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
7018			  &prev_ps->eth.rx_multicast,
7019			  &cur_ps->eth.rx_multicast);
7020
7021	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
7022			  &prev_ps->eth.rx_broadcast,
7023			  &cur_ps->eth.rx_broadcast);
7024
7025	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
7026			  &prev_ps->eth.rx_discards,
7027			  &cur_ps->eth.rx_discards);
7028
7029	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
7030			  &prev_ps->eth.tx_bytes,
7031			  &cur_ps->eth.tx_bytes);
7032
7033	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
7034			  &prev_ps->eth.tx_unicast,
7035			  &cur_ps->eth.tx_unicast);
7036
7037	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
7038			  &prev_ps->eth.tx_multicast,
7039			  &cur_ps->eth.tx_multicast);
7040
7041	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
7042			  &prev_ps->eth.tx_broadcast,
7043			  &cur_ps->eth.tx_broadcast);
7044
7045	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
7046			  &prev_ps->tx_dropped_link_down,
7047			  &cur_ps->tx_dropped_link_down);
7048
7049	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
7050			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
7051
7052	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
7053			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
7054
7055	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
7056			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
7057
7058	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
7059			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
7060
7061	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
7062			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
7063
7064	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
7065			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
7066
7067	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
7068			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
7069
7070	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
7071			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
7072
7073	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
7074			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
7075
7076	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
7077			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
7078
7079	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
7080			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
7081
7082	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
7083			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
7084
7085	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
7086			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
7087
7088	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
7089			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
7090
7091	fd_ctr_base = hw->fd_ctr_base;
7092
7093	ice_stat_update40(hw,
7094			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
7095			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
7096			  &cur_ps->fd_sb_match);
7097	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
7098			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
7099
7100	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
7101			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
7102
7103	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
7104			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
7105
7106	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
7107			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
7108
7109	ice_update_dcb_stats(pf);
7110
7111	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
7112			  &prev_ps->crc_errors, &cur_ps->crc_errors);
7113
7114	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
7115			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
7116
7117	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
7118			  &prev_ps->mac_local_faults,
7119			  &cur_ps->mac_local_faults);
7120
7121	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
7122			  &prev_ps->mac_remote_faults,
7123			  &cur_ps->mac_remote_faults);
7124
 
 
 
7125	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
7126			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
7127
7128	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
7129			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
7130
7131	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
7132			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
7133
7134	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
7135			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
7136
7137	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
7138
7139	pf->stat_prev_loaded = true;
7140}
7141
7142/**
7143 * ice_get_stats64 - get statistics for network device structure
7144 * @netdev: network interface device structure
7145 * @stats: main device statistics structure
7146 */
 
7147void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
7148{
7149	struct ice_netdev_priv *np = netdev_priv(netdev);
7150	struct rtnl_link_stats64 *vsi_stats;
7151	struct ice_vsi *vsi = np->vsi;
7152
7153	vsi_stats = &vsi->net_stats;
7154
7155	if (!vsi->num_txq || !vsi->num_rxq)
7156		return;
7157
7158	/* netdev packet/byte stats come from ring counter. These are obtained
7159	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
7160	 * But, only call the update routine and read the registers if VSI is
7161	 * not down.
7162	 */
7163	if (!test_bit(ICE_VSI_DOWN, vsi->state))
7164		ice_update_vsi_ring_stats(vsi);
7165	stats->tx_packets = vsi_stats->tx_packets;
7166	stats->tx_bytes = vsi_stats->tx_bytes;
7167	stats->rx_packets = vsi_stats->rx_packets;
7168	stats->rx_bytes = vsi_stats->rx_bytes;
7169
7170	/* The rest of the stats can be read from the hardware but instead we
7171	 * just return values that the watchdog task has already obtained from
7172	 * the hardware.
7173	 */
7174	stats->multicast = vsi_stats->multicast;
7175	stats->tx_errors = vsi_stats->tx_errors;
7176	stats->tx_dropped = vsi_stats->tx_dropped;
7177	stats->rx_errors = vsi_stats->rx_errors;
7178	stats->rx_dropped = vsi_stats->rx_dropped;
7179	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
7180	stats->rx_length_errors = vsi_stats->rx_length_errors;
7181}
7182
7183/**
7184 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7185 * @vsi: VSI having NAPI disabled
7186 */
7187static void ice_napi_disable_all(struct ice_vsi *vsi)
7188{
7189	int q_idx;
7190
7191	if (!vsi->netdev)
7192		return;
7193
7194	ice_for_each_q_vector(vsi, q_idx) {
7195		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7196
7197		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
7198			napi_disable(&q_vector->napi);
7199
7200		cancel_work_sync(&q_vector->tx.dim.work);
7201		cancel_work_sync(&q_vector->rx.dim.work);
7202	}
7203}
7204
7205/**
7206 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7207 * @vsi: the VSI being un-configured
7208 */
7209static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7210{
7211	struct ice_pf *pf = vsi->back;
7212	struct ice_hw *hw = &pf->hw;
7213	u32 val;
7214	int i;
7215
7216	/* disable interrupt causation from each Rx queue; Tx queues are
7217	 * handled in ice_vsi_stop_tx_ring()
7218	 */
7219	if (vsi->rx_rings) {
7220		ice_for_each_rxq(vsi, i) {
7221			if (vsi->rx_rings[i]) {
7222				u16 reg;
7223
7224				reg = vsi->rx_rings[i]->reg_idx;
7225				val = rd32(hw, QINT_RQCTL(reg));
7226				val &= ~QINT_RQCTL_CAUSE_ENA_M;
7227				wr32(hw, QINT_RQCTL(reg), val);
7228			}
7229		}
7230	}
7231
7232	/* disable each interrupt */
7233	ice_for_each_q_vector(vsi, i) {
7234		if (!vsi->q_vectors[i])
7235			continue;
7236		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7237	}
7238
7239	ice_flush(hw);
7240
7241	/* don't call synchronize_irq() for VF's from the host */
7242	if (vsi->type == ICE_VSI_VF)
7243		return;
7244
7245	ice_for_each_q_vector(vsi, i)
7246		synchronize_irq(vsi->q_vectors[i]->irq.virq);
7247}
7248
7249/**
7250 * ice_down - Shutdown the connection
7251 * @vsi: The VSI being stopped
7252 *
7253 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7254 */
7255int ice_down(struct ice_vsi *vsi)
7256{
7257	int i, tx_err, rx_err, vlan_err = 0;
7258
7259	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7260
 
 
 
7261	if (vsi->netdev) {
7262		vlan_err = ice_vsi_del_vlan_zero(vsi);
7263		ice_ptp_link_change(vsi->back, false);
7264		netif_carrier_off(vsi->netdev);
7265		netif_tx_disable(vsi->netdev);
7266	}
7267
7268	ice_vsi_dis_irq(vsi);
7269
7270	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7271	if (tx_err)
7272		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7273			   vsi->vsi_num, tx_err);
7274	if (!tx_err && vsi->xdp_rings) {
7275		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7276		if (tx_err)
7277			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7278				   vsi->vsi_num, tx_err);
7279	}
7280
7281	rx_err = ice_vsi_stop_all_rx_rings(vsi);
7282	if (rx_err)
7283		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7284			   vsi->vsi_num, rx_err);
7285
7286	ice_napi_disable_all(vsi);
7287
 
 
 
 
 
 
 
7288	ice_for_each_txq(vsi, i)
7289		ice_clean_tx_ring(vsi->tx_rings[i]);
7290
7291	if (vsi->xdp_rings)
7292		ice_for_each_xdp_txq(vsi, i)
7293			ice_clean_tx_ring(vsi->xdp_rings[i]);
7294
7295	ice_for_each_rxq(vsi, i)
7296		ice_clean_rx_ring(vsi->rx_rings[i]);
7297
7298	if (tx_err || rx_err || vlan_err) {
7299		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7300			   vsi->vsi_num, vsi->vsw->sw_id);
7301		return -EIO;
7302	}
7303
7304	return 0;
7305}
7306
7307/**
7308 * ice_down_up - shutdown the VSI connection and bring it up
7309 * @vsi: the VSI to be reconnected
7310 */
7311int ice_down_up(struct ice_vsi *vsi)
7312{
7313	int ret;
7314
7315	/* if DOWN already set, nothing to do */
7316	if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7317		return 0;
7318
7319	ret = ice_down(vsi);
7320	if (ret)
7321		return ret;
7322
7323	ret = ice_up(vsi);
7324	if (ret) {
7325		netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7326		return ret;
7327	}
7328
7329	return 0;
7330}
7331
7332/**
7333 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7334 * @vsi: VSI having resources allocated
7335 *
7336 * Return 0 on success, negative on failure
7337 */
7338int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7339{
7340	int i, err = 0;
7341
7342	if (!vsi->num_txq) {
7343		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7344			vsi->vsi_num);
7345		return -EINVAL;
7346	}
7347
7348	ice_for_each_txq(vsi, i) {
7349		struct ice_tx_ring *ring = vsi->tx_rings[i];
7350
7351		if (!ring)
7352			return -EINVAL;
7353
7354		if (vsi->netdev)
7355			ring->netdev = vsi->netdev;
7356		err = ice_setup_tx_ring(ring);
7357		if (err)
7358			break;
7359	}
7360
7361	return err;
7362}
7363
7364/**
7365 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7366 * @vsi: VSI having resources allocated
7367 *
7368 * Return 0 on success, negative on failure
7369 */
7370int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7371{
7372	int i, err = 0;
7373
7374	if (!vsi->num_rxq) {
7375		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7376			vsi->vsi_num);
7377		return -EINVAL;
7378	}
7379
7380	ice_for_each_rxq(vsi, i) {
7381		struct ice_rx_ring *ring = vsi->rx_rings[i];
7382
7383		if (!ring)
7384			return -EINVAL;
7385
7386		if (vsi->netdev)
7387			ring->netdev = vsi->netdev;
7388		err = ice_setup_rx_ring(ring);
7389		if (err)
7390			break;
7391	}
7392
7393	return err;
7394}
7395
7396/**
7397 * ice_vsi_open_ctrl - open control VSI for use
7398 * @vsi: the VSI to open
7399 *
7400 * Initialization of the Control VSI
7401 *
7402 * Returns 0 on success, negative value on error
7403 */
7404int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7405{
7406	char int_name[ICE_INT_NAME_STR_LEN];
7407	struct ice_pf *pf = vsi->back;
7408	struct device *dev;
7409	int err;
7410
7411	dev = ice_pf_to_dev(pf);
7412	/* allocate descriptors */
7413	err = ice_vsi_setup_tx_rings(vsi);
7414	if (err)
7415		goto err_setup_tx;
7416
7417	err = ice_vsi_setup_rx_rings(vsi);
7418	if (err)
7419		goto err_setup_rx;
7420
7421	err = ice_vsi_cfg_lan(vsi);
7422	if (err)
7423		goto err_setup_rx;
7424
7425	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7426		 dev_driver_string(dev), dev_name(dev));
7427	err = ice_vsi_req_irq_msix(vsi, int_name);
7428	if (err)
7429		goto err_setup_rx;
7430
7431	ice_vsi_cfg_msix(vsi);
7432
7433	err = ice_vsi_start_all_rx_rings(vsi);
7434	if (err)
7435		goto err_up_complete;
7436
7437	clear_bit(ICE_VSI_DOWN, vsi->state);
7438	ice_vsi_ena_irq(vsi);
7439
7440	return 0;
7441
7442err_up_complete:
7443	ice_down(vsi);
7444err_setup_rx:
7445	ice_vsi_free_rx_rings(vsi);
7446err_setup_tx:
7447	ice_vsi_free_tx_rings(vsi);
7448
7449	return err;
7450}
7451
7452/**
7453 * ice_vsi_open - Called when a network interface is made active
7454 * @vsi: the VSI to open
7455 *
7456 * Initialization of the VSI
7457 *
7458 * Returns 0 on success, negative value on error
7459 */
7460int ice_vsi_open(struct ice_vsi *vsi)
7461{
7462	char int_name[ICE_INT_NAME_STR_LEN];
7463	struct ice_pf *pf = vsi->back;
7464	int err;
7465
7466	/* allocate descriptors */
7467	err = ice_vsi_setup_tx_rings(vsi);
7468	if (err)
7469		goto err_setup_tx;
7470
7471	err = ice_vsi_setup_rx_rings(vsi);
7472	if (err)
7473		goto err_setup_rx;
7474
7475	err = ice_vsi_cfg_lan(vsi);
7476	if (err)
7477		goto err_setup_rx;
7478
7479	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7480		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7481	err = ice_vsi_req_irq_msix(vsi, int_name);
7482	if (err)
7483		goto err_setup_rx;
7484
7485	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7486
7487	if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
7488		/* Notify the stack of the actual queue counts. */
7489		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7490		if (err)
7491			goto err_set_qs;
7492
7493		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7494		if (err)
7495			goto err_set_qs;
7496
7497		ice_vsi_set_napi_queues(vsi);
7498	}
 
7499
7500	err = ice_up_complete(vsi);
7501	if (err)
7502		goto err_up_complete;
7503
7504	return 0;
7505
7506err_up_complete:
7507	ice_down(vsi);
7508err_set_qs:
7509	ice_vsi_free_irq(vsi);
7510err_setup_rx:
7511	ice_vsi_free_rx_rings(vsi);
7512err_setup_tx:
7513	ice_vsi_free_tx_rings(vsi);
7514
7515	return err;
7516}
7517
7518/**
7519 * ice_vsi_release_all - Delete all VSIs
7520 * @pf: PF from which all VSIs are being removed
7521 */
7522static void ice_vsi_release_all(struct ice_pf *pf)
7523{
7524	int err, i;
7525
7526	if (!pf->vsi)
7527		return;
7528
7529	ice_for_each_vsi(pf, i) {
7530		if (!pf->vsi[i])
7531			continue;
7532
7533		if (pf->vsi[i]->type == ICE_VSI_CHNL)
7534			continue;
7535
7536		err = ice_vsi_release(pf->vsi[i]);
7537		if (err)
7538			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7539				i, err, pf->vsi[i]->vsi_num);
7540	}
7541}
7542
7543/**
7544 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7545 * @pf: pointer to the PF instance
7546 * @type: VSI type to rebuild
7547 *
7548 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7549 */
7550static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7551{
7552	struct device *dev = ice_pf_to_dev(pf);
 
7553	int i, err;
7554
7555	ice_for_each_vsi(pf, i) {
7556		struct ice_vsi *vsi = pf->vsi[i];
7557
7558		if (!vsi || vsi->type != type)
7559			continue;
7560
7561		/* rebuild the VSI */
7562		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7563		if (err) {
7564			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7565				err, vsi->idx, ice_vsi_type_str(type));
7566			return err;
7567		}
7568
7569		/* replay filters for the VSI */
7570		err = ice_replay_vsi(&pf->hw, vsi->idx);
7571		if (err) {
7572			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7573				err, vsi->idx, ice_vsi_type_str(type));
7574			return err;
 
7575		}
7576
7577		/* Re-map HW VSI number, using VSI handle that has been
7578		 * previously validated in ice_replay_vsi() call above
7579		 */
7580		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7581
7582		/* enable the VSI */
7583		err = ice_ena_vsi(vsi, false);
7584		if (err) {
7585			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7586				err, vsi->idx, ice_vsi_type_str(type));
7587			return err;
7588		}
7589
7590		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7591			 ice_vsi_type_str(type));
7592	}
7593
7594	return 0;
7595}
7596
7597/**
7598 * ice_update_pf_netdev_link - Update PF netdev link status
7599 * @pf: pointer to the PF instance
7600 */
7601static void ice_update_pf_netdev_link(struct ice_pf *pf)
7602{
7603	bool link_up;
7604	int i;
7605
7606	ice_for_each_vsi(pf, i) {
7607		struct ice_vsi *vsi = pf->vsi[i];
7608
7609		if (!vsi || vsi->type != ICE_VSI_PF)
7610			return;
7611
7612		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7613		if (link_up) {
7614			netif_carrier_on(pf->vsi[i]->netdev);
7615			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7616		} else {
7617			netif_carrier_off(pf->vsi[i]->netdev);
7618			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7619		}
7620	}
7621}
7622
7623/**
7624 * ice_rebuild - rebuild after reset
7625 * @pf: PF to rebuild
7626 * @reset_type: type of reset
7627 *
7628 * Do not rebuild VF VSI in this flow because that is already handled via
7629 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7630 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7631 * to reset/rebuild all the VF VSI twice.
7632 */
7633static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7634{
7635	struct ice_vsi *vsi = ice_get_main_vsi(pf);
7636	struct device *dev = ice_pf_to_dev(pf);
7637	struct ice_hw *hw = &pf->hw;
7638	bool dvm;
7639	int err;
7640
7641	if (test_bit(ICE_DOWN, pf->state))
7642		goto clear_recovery;
7643
7644	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7645
7646#define ICE_EMP_RESET_SLEEP_MS 5000
7647	if (reset_type == ICE_RESET_EMPR) {
7648		/* If an EMP reset has occurred, any previously pending flash
7649		 * update will have completed. We no longer know whether or
7650		 * not the NVM update EMP reset is restricted.
7651		 */
7652		pf->fw_emp_reset_disabled = false;
7653
7654		msleep(ICE_EMP_RESET_SLEEP_MS);
7655	}
7656
7657	err = ice_init_all_ctrlq(hw);
7658	if (err) {
7659		dev_err(dev, "control queues init failed %d\n", err);
7660		goto err_init_ctrlq;
7661	}
7662
7663	/* if DDP was previously loaded successfully */
7664	if (!ice_is_safe_mode(pf)) {
7665		/* reload the SW DB of filter tables */
7666		if (reset_type == ICE_RESET_PFR)
7667			ice_fill_blk_tbls(hw);
7668		else
7669			/* Reload DDP Package after CORER/GLOBR reset */
7670			ice_load_pkg(NULL, pf);
7671	}
7672
7673	err = ice_clear_pf_cfg(hw);
7674	if (err) {
7675		dev_err(dev, "clear PF configuration failed %d\n", err);
 
7676		goto err_init_ctrlq;
7677	}
7678
 
 
 
 
 
 
7679	ice_clear_pxe_mode(hw);
7680
7681	err = ice_init_nvm(hw);
7682	if (err) {
7683		dev_err(dev, "ice_init_nvm failed %d\n", err);
7684		goto err_init_ctrlq;
7685	}
7686
7687	err = ice_get_caps(hw);
7688	if (err) {
7689		dev_err(dev, "ice_get_caps failed %d\n", err);
7690		goto err_init_ctrlq;
7691	}
7692
7693	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7694	if (err) {
7695		dev_err(dev, "set_mac_cfg failed %d\n", err);
7696		goto err_init_ctrlq;
7697	}
7698
7699	dvm = ice_is_dvm_ena(hw);
7700
7701	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7702	if (err)
7703		goto err_init_ctrlq;
7704
7705	err = ice_sched_init_port(hw->port_info);
7706	if (err)
7707		goto err_sched_init_port;
7708
7709	/* start misc vector */
7710	err = ice_req_irq_msix_misc(pf);
7711	if (err) {
7712		dev_err(dev, "misc vector setup failed: %d\n", err);
7713		goto err_sched_init_port;
7714	}
7715
7716	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7717		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7718		if (!rd32(hw, PFQF_FD_SIZE)) {
7719			u16 unused, guar, b_effort;
7720
7721			guar = hw->func_caps.fd_fltr_guar;
7722			b_effort = hw->func_caps.fd_fltr_best_effort;
7723
7724			/* force guaranteed filter pool for PF */
7725			ice_alloc_fd_guar_item(hw, &unused, guar);
7726			/* force shared filter pool for PF */
7727			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7728		}
7729	}
7730
7731	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7732		ice_dcb_rebuild(pf);
7733
7734	/* If the PF previously had enabled PTP, PTP init needs to happen before
7735	 * the VSI rebuild. If not, this causes the PTP link status events to
7736	 * fail.
7737	 */
7738	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7739		ice_ptp_rebuild(pf, reset_type);
7740
7741	if (ice_is_feature_supported(pf, ICE_F_GNSS))
7742		ice_gnss_init(pf);
7743
7744	/* rebuild PF VSI */
7745	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7746	if (err) {
7747		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7748		goto err_vsi_rebuild;
7749	}
7750
7751	if (reset_type == ICE_RESET_PFR) {
7752		err = ice_rebuild_channels(pf);
7753		if (err) {
7754			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7755				err);
7756			goto err_vsi_rebuild;
7757		}
7758	}
7759
7760	/* If Flow Director is active */
7761	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7762		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7763		if (err) {
7764			dev_err(dev, "control VSI rebuild failed: %d\n", err);
7765			goto err_vsi_rebuild;
7766		}
7767
7768		/* replay HW Flow Director recipes */
7769		if (hw->fdir_prof)
7770			ice_fdir_replay_flows(hw);
7771
7772		/* replay Flow Director filters */
7773		ice_fdir_replay_fltrs(pf);
7774
7775		ice_rebuild_arfs(pf);
7776	}
7777
7778	if (vsi && vsi->netdev)
7779		netif_device_attach(vsi->netdev);
7780
7781	ice_update_pf_netdev_link(pf);
7782
7783	/* tell the firmware we are up */
7784	err = ice_send_version(pf);
7785	if (err) {
7786		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7787			err);
7788		goto err_vsi_rebuild;
7789	}
7790
7791	ice_replay_post(hw);
7792
7793	/* if we get here, reset flow is successful */
7794	clear_bit(ICE_RESET_FAILED, pf->state);
7795
7796	ice_plug_aux_dev(pf);
7797	if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7798		ice_lag_rebuild(pf);
7799
7800	/* Restore timestamp mode settings after VSI rebuild */
7801	ice_ptp_restore_timestamp_mode(pf);
7802	return;
7803
7804err_vsi_rebuild:
7805err_sched_init_port:
7806	ice_sched_cleanup_all(hw);
7807err_init_ctrlq:
7808	ice_shutdown_all_ctrlq(hw, false);
7809	set_bit(ICE_RESET_FAILED, pf->state);
7810clear_recovery:
7811	/* set this bit in PF state to control service task scheduling */
7812	set_bit(ICE_NEEDS_RESTART, pf->state);
7813	dev_err(dev, "Rebuild failed, unload and reload driver\n");
7814}
7815
7816/**
 
 
 
 
 
 
 
 
 
 
 
 
7817 * ice_change_mtu - NDO callback to change the MTU
7818 * @netdev: network interface device structure
7819 * @new_mtu: new value for maximum frame size
7820 *
7821 * Returns 0 on success, negative on failure
7822 */
7823int ice_change_mtu(struct net_device *netdev, int new_mtu)
7824{
7825	struct ice_netdev_priv *np = netdev_priv(netdev);
7826	struct ice_vsi *vsi = np->vsi;
7827	struct ice_pf *pf = vsi->back;
7828	struct bpf_prog *prog;
7829	u8 count = 0;
7830	int err = 0;
7831
7832	if (new_mtu == (int)netdev->mtu) {
7833		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7834		return 0;
7835	}
7836
7837	prog = vsi->xdp_prog;
7838	if (prog && !prog->aux->xdp_has_frags) {
7839		int frame_size = ice_max_xdp_frame_size(vsi);
7840
7841		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7842			netdev_err(netdev, "max MTU for XDP usage is %d\n",
7843				   frame_size - ICE_ETH_PKT_HDR_PAD);
7844			return -EINVAL;
7845		}
7846	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7847		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7848			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7849				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7850			return -EINVAL;
7851		}
7852	}
7853
7854	/* if a reset is in progress, wait for some time for it to complete */
7855	do {
7856		if (ice_is_reset_in_progress(pf->state)) {
7857			count++;
7858			usleep_range(1000, 2000);
7859		} else {
7860			break;
7861		}
7862
7863	} while (count < 100);
7864
7865	if (count == 100) {
7866		netdev_err(netdev, "can't change MTU. Device is busy\n");
7867		return -EBUSY;
7868	}
7869
7870	WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
7871	err = ice_down_up(vsi);
7872	if (err)
7873		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7874
7875	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7876	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
 
 
 
7877
7878	return err;
7879}
7880
7881/**
7882 * ice_eth_ioctl - Access the hwtstamp interface
7883 * @netdev: network interface device structure
7884 * @ifr: interface request data
7885 * @cmd: ioctl command
7886 */
7887static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7888{
7889	struct ice_netdev_priv *np = netdev_priv(netdev);
7890	struct ice_pf *pf = np->vsi->back;
7891
7892	switch (cmd) {
7893	case SIOCGHWTSTAMP:
7894		return ice_ptp_get_ts_config(pf, ifr);
7895	case SIOCSHWTSTAMP:
7896		return ice_ptp_set_ts_config(pf, ifr);
7897	default:
7898		return -EOPNOTSUPP;
7899	}
7900}
7901
7902/**
7903 * ice_aq_str - convert AQ err code to a string
7904 * @aq_err: the AQ error code to convert
7905 */
7906const char *ice_aq_str(enum ice_aq_err aq_err)
7907{
7908	switch (aq_err) {
7909	case ICE_AQ_RC_OK:
7910		return "OK";
7911	case ICE_AQ_RC_EPERM:
7912		return "ICE_AQ_RC_EPERM";
7913	case ICE_AQ_RC_ENOENT:
7914		return "ICE_AQ_RC_ENOENT";
7915	case ICE_AQ_RC_ENOMEM:
7916		return "ICE_AQ_RC_ENOMEM";
7917	case ICE_AQ_RC_EBUSY:
7918		return "ICE_AQ_RC_EBUSY";
7919	case ICE_AQ_RC_EEXIST:
7920		return "ICE_AQ_RC_EEXIST";
7921	case ICE_AQ_RC_EINVAL:
7922		return "ICE_AQ_RC_EINVAL";
7923	case ICE_AQ_RC_ENOSPC:
7924		return "ICE_AQ_RC_ENOSPC";
7925	case ICE_AQ_RC_ENOSYS:
7926		return "ICE_AQ_RC_ENOSYS";
7927	case ICE_AQ_RC_EMODE:
7928		return "ICE_AQ_RC_EMODE";
7929	case ICE_AQ_RC_ENOSEC:
7930		return "ICE_AQ_RC_ENOSEC";
7931	case ICE_AQ_RC_EBADSIG:
7932		return "ICE_AQ_RC_EBADSIG";
7933	case ICE_AQ_RC_ESVN:
7934		return "ICE_AQ_RC_ESVN";
7935	case ICE_AQ_RC_EBADMAN:
7936		return "ICE_AQ_RC_EBADMAN";
7937	case ICE_AQ_RC_EBADBUF:
7938		return "ICE_AQ_RC_EBADBUF";
7939	}
7940
7941	return "ICE_AQ_RC_UNKNOWN";
7942}
7943
7944/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7945 * ice_set_rss_lut - Set RSS LUT
7946 * @vsi: Pointer to VSI structure
7947 * @lut: Lookup table
7948 * @lut_size: Lookup table size
7949 *
7950 * Returns 0 on success, negative on failure
7951 */
7952int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7953{
7954	struct ice_aq_get_set_rss_lut_params params = {};
7955	struct ice_hw *hw = &vsi->back->hw;
7956	int status;
7957
7958	if (!lut)
7959		return -EINVAL;
7960
7961	params.vsi_handle = vsi->idx;
7962	params.lut_size = lut_size;
7963	params.lut_type = vsi->rss_lut_type;
7964	params.lut = lut;
7965
7966	status = ice_aq_set_rss_lut(hw, &params);
7967	if (status)
7968		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7969			status, ice_aq_str(hw->adminq.sq_last_status));
 
 
 
7970
7971	return status;
7972}
7973
7974/**
7975 * ice_set_rss_key - Set RSS key
7976 * @vsi: Pointer to the VSI structure
7977 * @seed: RSS hash seed
7978 *
7979 * Returns 0 on success, negative on failure
7980 */
7981int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7982{
7983	struct ice_hw *hw = &vsi->back->hw;
7984	int status;
7985
7986	if (!seed)
7987		return -EINVAL;
7988
7989	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7990	if (status)
7991		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7992			status, ice_aq_str(hw->adminq.sq_last_status));
 
 
 
7993
7994	return status;
7995}
7996
7997/**
7998 * ice_get_rss_lut - Get RSS LUT
7999 * @vsi: Pointer to VSI structure
8000 * @lut: Buffer to store the lookup table entries
8001 * @lut_size: Size of buffer to store the lookup table entries
8002 *
8003 * Returns 0 on success, negative on failure
8004 */
8005int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
8006{
8007	struct ice_aq_get_set_rss_lut_params params = {};
8008	struct ice_hw *hw = &vsi->back->hw;
8009	int status;
8010
8011	if (!lut)
8012		return -EINVAL;
8013
8014	params.vsi_handle = vsi->idx;
8015	params.lut_size = lut_size;
8016	params.lut_type = vsi->rss_lut_type;
8017	params.lut = lut;
8018
8019	status = ice_aq_get_rss_lut(hw, &params);
8020	if (status)
8021		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
8022			status, ice_aq_str(hw->adminq.sq_last_status));
 
 
 
8023
8024	return status;
8025}
8026
8027/**
8028 * ice_get_rss_key - Get RSS key
8029 * @vsi: Pointer to VSI structure
8030 * @seed: Buffer to store the key in
8031 *
8032 * Returns 0 on success, negative on failure
8033 */
8034int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
8035{
8036	struct ice_hw *hw = &vsi->back->hw;
8037	int status;
8038
8039	if (!seed)
8040		return -EINVAL;
8041
8042	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
8043	if (status)
8044		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
8045			status, ice_aq_str(hw->adminq.sq_last_status));
8046
8047	return status;
8048}
8049
8050/**
8051 * ice_set_rss_hfunc - Set RSS HASH function
8052 * @vsi: Pointer to VSI structure
8053 * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
8054 *
8055 * Returns 0 on success, negative on failure
8056 */
8057int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
8058{
8059	struct ice_hw *hw = &vsi->back->hw;
8060	struct ice_vsi_ctx *ctx;
8061	bool symm;
8062	int err;
8063
8064	if (hfunc == vsi->rss_hfunc)
8065		return 0;
8066
8067	if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
8068	    hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
8069		return -EOPNOTSUPP;
8070
8071	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8072	if (!ctx)
8073		return -ENOMEM;
8074
8075	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
8076	ctx->info.q_opt_rss = vsi->info.q_opt_rss;
8077	ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
8078	ctx->info.q_opt_rss |=
8079		FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
8080	ctx->info.q_opt_tc = vsi->info.q_opt_tc;
8081	ctx->info.q_opt_flags = vsi->info.q_opt_rss;
8082
8083	err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
8084	if (err) {
8085		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
8086			vsi->vsi_num, err);
8087	} else {
8088		vsi->info.q_opt_rss = ctx->info.q_opt_rss;
8089		vsi->rss_hfunc = hfunc;
8090		netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
8091			    hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
8092			    "Symmetric " : "");
8093	}
8094	kfree(ctx);
8095	if (err)
8096		return err;
8097
8098	/* Fix the symmetry setting for all existing RSS configurations */
8099	symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
8100	return ice_set_rss_cfg_symm(hw, vsi, symm);
8101}
8102
8103/**
8104 * ice_bridge_getlink - Get the hardware bridge mode
8105 * @skb: skb buff
8106 * @pid: process ID
8107 * @seq: RTNL message seq
8108 * @dev: the netdev being configured
8109 * @filter_mask: filter mask passed in
8110 * @nlflags: netlink flags passed in
8111 *
8112 * Return the bridge mode (VEB/VEPA)
8113 */
8114static int
8115ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8116		   struct net_device *dev, u32 filter_mask, int nlflags)
8117{
8118	struct ice_netdev_priv *np = netdev_priv(dev);
8119	struct ice_vsi *vsi = np->vsi;
8120	struct ice_pf *pf = vsi->back;
8121	u16 bmode;
8122
8123	bmode = pf->first_sw->bridge_mode;
8124
8125	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
8126				       filter_mask, NULL);
8127}
8128
8129/**
8130 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8131 * @vsi: Pointer to VSI structure
8132 * @bmode: Hardware bridge mode (VEB/VEPA)
8133 *
8134 * Returns 0 on success, negative on failure
8135 */
8136static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
8137{
8138	struct ice_aqc_vsi_props *vsi_props;
8139	struct ice_hw *hw = &vsi->back->hw;
8140	struct ice_vsi_ctx *ctxt;
8141	int ret;
 
8142
8143	vsi_props = &vsi->info;
8144
8145	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
8146	if (!ctxt)
8147		return -ENOMEM;
8148
8149	ctxt->info = vsi->info;
8150
8151	if (bmode == BRIDGE_MODE_VEB)
8152		/* change from VEPA to VEB mode */
8153		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8154	else
8155		/* change from VEB to VEPA mode */
8156		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8157	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
8158
8159	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
8160	if (ret) {
8161		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
8162			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
 
 
8163		goto out;
8164	}
8165	/* Update sw flags for book keeping */
8166	vsi_props->sw_flags = ctxt->info.sw_flags;
8167
8168out:
8169	kfree(ctxt);
8170	return ret;
8171}
8172
8173/**
8174 * ice_bridge_setlink - Set the hardware bridge mode
8175 * @dev: the netdev being configured
8176 * @nlh: RTNL message
8177 * @flags: bridge setlink flags
8178 * @extack: netlink extended ack
8179 *
8180 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
8181 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8182 * not already set for all VSIs connected to this switch. And also update the
8183 * unicast switch filter rules for the corresponding switch of the netdev.
8184 */
8185static int
8186ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
8187		   u16 __always_unused flags,
8188		   struct netlink_ext_ack __always_unused *extack)
8189{
8190	struct ice_netdev_priv *np = netdev_priv(dev);
8191	struct ice_pf *pf = np->vsi->back;
8192	struct nlattr *attr, *br_spec;
8193	struct ice_hw *hw = &pf->hw;
 
8194	struct ice_sw *pf_sw;
8195	int rem, v, err = 0;
8196
8197	pf_sw = pf->first_sw;
8198	/* find the attribute in the netlink message */
8199	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8200	if (!br_spec)
8201		return -EINVAL;
8202
8203	nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
8204		__u16 mode = nla_get_u16(attr);
8205
 
 
 
8206		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
8207			return -EINVAL;
8208		/* Continue  if bridge mode is not being flipped */
8209		if (mode == pf_sw->bridge_mode)
8210			continue;
8211		/* Iterates through the PF VSI list and update the loopback
8212		 * mode of the VSI
8213		 */
8214		ice_for_each_vsi(pf, v) {
8215			if (!pf->vsi[v])
8216				continue;
8217			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8218			if (err)
8219				return err;
8220		}
8221
8222		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
8223		/* Update the unicast switch filter rules for the corresponding
8224		 * switch of the netdev
8225		 */
8226		err = ice_update_sw_rule_bridge_mode(hw);
8227		if (err) {
8228			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
8229				   mode, err,
8230				   ice_aq_str(hw->adminq.sq_last_status));
8231			/* revert hw->evb_veb */
8232			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
8233			return err;
8234		}
8235
8236		pf_sw->bridge_mode = mode;
8237	}
8238
8239	return 0;
8240}
8241
8242/**
8243 * ice_tx_timeout - Respond to a Tx Hang
8244 * @netdev: network interface device structure
8245 * @txqueue: Tx queue
8246 */
8247void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
8248{
8249	struct ice_netdev_priv *np = netdev_priv(netdev);
8250	struct ice_tx_ring *tx_ring = NULL;
8251	struct ice_vsi *vsi = np->vsi;
8252	struct ice_pf *pf = vsi->back;
8253	u32 i;
8254
8255	pf->tx_timeout_count++;
8256
8257	/* Check if PFC is enabled for the TC to which the queue belongs
8258	 * to. If yes then Tx timeout is not caused by a hung queue, no
8259	 * need to reset and rebuild
8260	 */
8261	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8262		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8263			 txqueue);
8264		return;
8265	}
8266
8267	/* now that we have an index, find the tx_ring struct */
8268	ice_for_each_txq(vsi, i)
8269		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8270			if (txqueue == vsi->tx_rings[i]->q_index) {
8271				tx_ring = vsi->tx_rings[i];
8272				break;
8273			}
8274
8275	/* Reset recovery level if enough time has elapsed after last timeout.
8276	 * Also ensure no new reset action happens before next timeout period.
8277	 */
8278	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8279		pf->tx_timeout_recovery_level = 1;
8280	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8281				       netdev->watchdog_timeo)))
8282		return;
8283
8284	if (tx_ring) {
8285		struct ice_hw *hw = &pf->hw;
8286		u32 head, val = 0;
8287
8288		head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
8289				 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8290		/* Read interrupt register */
8291		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8292
8293		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8294			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8295			    head, tx_ring->next_to_use, val);
8296	}
8297
8298	pf->tx_timeout_last_recovery = jiffies;
8299	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8300		    pf->tx_timeout_recovery_level, txqueue);
8301
8302	switch (pf->tx_timeout_recovery_level) {
8303	case 1:
8304		set_bit(ICE_PFR_REQ, pf->state);
8305		break;
8306	case 2:
8307		set_bit(ICE_CORER_REQ, pf->state);
8308		break;
8309	case 3:
8310		set_bit(ICE_GLOBR_REQ, pf->state);
8311		break;
8312	default:
8313		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8314		set_bit(ICE_DOWN, pf->state);
8315		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8316		set_bit(ICE_SERVICE_DIS, pf->state);
8317		break;
8318	}
8319
8320	ice_service_task_schedule(pf);
8321	pf->tx_timeout_recovery_level++;
8322}
8323
8324/**
8325 * ice_setup_tc_cls_flower - flower classifier offloads
8326 * @np: net device to configure
8327 * @filter_dev: device on which filter is added
8328 * @cls_flower: offload data
8329 */
8330static int
8331ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8332			struct net_device *filter_dev,
8333			struct flow_cls_offload *cls_flower)
8334{
8335	struct ice_vsi *vsi = np->vsi;
8336
8337	if (cls_flower->common.chain_index)
8338		return -EOPNOTSUPP;
8339
8340	switch (cls_flower->command) {
8341	case FLOW_CLS_REPLACE:
8342		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
8343	case FLOW_CLS_DESTROY:
8344		return ice_del_cls_flower(vsi, cls_flower);
8345	default:
8346		return -EINVAL;
8347	}
8348}
8349
8350/**
8351 * ice_setup_tc_block_cb - callback handler registered for TC block
8352 * @type: TC SETUP type
8353 * @type_data: TC flower offload data that contains user input
8354 * @cb_priv: netdev private data
8355 */
8356static int
8357ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
8358{
8359	struct ice_netdev_priv *np = cb_priv;
8360
8361	switch (type) {
8362	case TC_SETUP_CLSFLOWER:
8363		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8364					       type_data);
8365	default:
8366		return -EOPNOTSUPP;
8367	}
8368}
8369
8370/**
8371 * ice_validate_mqprio_qopt - Validate TCF input parameters
8372 * @vsi: Pointer to VSI
8373 * @mqprio_qopt: input parameters for mqprio queue configuration
8374 *
8375 * This function validates MQPRIO params, such as qcount (power of 2 wherever
8376 * needed), and make sure user doesn't specify qcount and BW rate limit
8377 * for TCs, which are more than "num_tc"
8378 */
8379static int
8380ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8381			 struct tc_mqprio_qopt_offload *mqprio_qopt)
8382{
8383	int non_power_of_2_qcount = 0;
8384	struct ice_pf *pf = vsi->back;
8385	int max_rss_q_cnt = 0;
8386	u64 sum_min_rate = 0;
8387	struct device *dev;
8388	int i, speed;
8389	u8 num_tc;
8390
8391	if (vsi->type != ICE_VSI_PF)
8392		return -EINVAL;
8393
8394	if (mqprio_qopt->qopt.offset[0] != 0 ||
8395	    mqprio_qopt->qopt.num_tc < 1 ||
8396	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8397		return -EINVAL;
8398
8399	dev = ice_pf_to_dev(pf);
8400	vsi->ch_rss_size = 0;
8401	num_tc = mqprio_qopt->qopt.num_tc;
8402	speed = ice_get_link_speed_kbps(vsi);
8403
8404	for (i = 0; num_tc; i++) {
8405		int qcount = mqprio_qopt->qopt.count[i];
8406		u64 max_rate, min_rate, rem;
8407
8408		if (!qcount)
8409			return -EINVAL;
8410
8411		if (is_power_of_2(qcount)) {
8412			if (non_power_of_2_qcount &&
8413			    qcount > non_power_of_2_qcount) {
8414				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8415					qcount, non_power_of_2_qcount);
8416				return -EINVAL;
8417			}
8418			if (qcount > max_rss_q_cnt)
8419				max_rss_q_cnt = qcount;
8420		} else {
8421			if (non_power_of_2_qcount &&
8422			    qcount != non_power_of_2_qcount) {
8423				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8424					qcount, non_power_of_2_qcount);
8425				return -EINVAL;
8426			}
8427			if (qcount < max_rss_q_cnt) {
8428				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8429					qcount, max_rss_q_cnt);
8430				return -EINVAL;
8431			}
8432			max_rss_q_cnt = qcount;
8433			non_power_of_2_qcount = qcount;
8434		}
8435
8436		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8437		 * converts the bandwidth rate limit into Bytes/s when
8438		 * passing it down to the driver. So convert input bandwidth
8439		 * from Bytes/s to Kbps
8440		 */
8441		max_rate = mqprio_qopt->max_rate[i];
8442		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8443
8444		/* min_rate is minimum guaranteed rate and it can't be zero */
8445		min_rate = mqprio_qopt->min_rate[i];
8446		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8447		sum_min_rate += min_rate;
8448
8449		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8450			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8451				min_rate, ICE_MIN_BW_LIMIT);
8452			return -EINVAL;
8453		}
8454
8455		if (max_rate && max_rate > speed) {
8456			dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8457				i, max_rate, speed);
8458			return -EINVAL;
8459		}
8460
8461		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8462		if (rem) {
8463			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8464				i, ICE_MIN_BW_LIMIT);
8465			return -EINVAL;
8466		}
8467
8468		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8469		if (rem) {
8470			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8471				i, ICE_MIN_BW_LIMIT);
8472			return -EINVAL;
8473		}
8474
8475		/* min_rate can't be more than max_rate, except when max_rate
8476		 * is zero (implies max_rate sought is max line rate). In such
8477		 * a case min_rate can be more than max.
8478		 */
8479		if (max_rate && min_rate > max_rate) {
8480			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8481				min_rate, max_rate);
8482			return -EINVAL;
8483		}
8484
8485		if (i >= mqprio_qopt->qopt.num_tc - 1)
8486			break;
8487		if (mqprio_qopt->qopt.offset[i + 1] !=
8488		    (mqprio_qopt->qopt.offset[i] + qcount))
8489			return -EINVAL;
8490	}
8491	if (vsi->num_rxq <
8492	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8493		return -EINVAL;
8494	if (vsi->num_txq <
8495	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8496		return -EINVAL;
8497
8498	if (sum_min_rate && sum_min_rate > (u64)speed) {
8499		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8500			sum_min_rate, speed);
8501		return -EINVAL;
8502	}
8503
8504	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8505	vsi->ch_rss_size = max_rss_q_cnt;
8506
8507	return 0;
8508}
8509
8510/**
8511 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8512 * @pf: ptr to PF device
8513 * @vsi: ptr to VSI
8514 */
8515static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8516{
8517	struct device *dev = ice_pf_to_dev(pf);
8518	bool added = false;
8519	struct ice_hw *hw;
8520	int flow;
8521
8522	if (!(vsi->num_gfltr || vsi->num_bfltr))
8523		return -EINVAL;
8524
8525	hw = &pf->hw;
8526	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8527		struct ice_fd_hw_prof *prof;
8528		int tun, status;
8529		u64 entry_h;
8530
8531		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8532		      hw->fdir_prof[flow]->cnt))
8533			continue;
8534
8535		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8536			enum ice_flow_priority prio;
8537
8538			/* add this VSI to FDir profile for this flow */
8539			prio = ICE_FLOW_PRIO_NORMAL;
8540			prof = hw->fdir_prof[flow];
8541			status = ice_flow_add_entry(hw, ICE_BLK_FD,
8542						    prof->prof_id[tun],
8543						    prof->vsi_h[0], vsi->idx,
8544						    prio, prof->fdir_seg[tun],
8545						    &entry_h);
8546			if (status) {
8547				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8548					vsi->idx, flow);
8549				continue;
8550			}
8551
8552			prof->entry_h[prof->cnt][tun] = entry_h;
8553		}
8554
8555		/* store VSI for filter replay and delete */
8556		prof->vsi_h[prof->cnt] = vsi->idx;
8557		prof->cnt++;
8558
8559		added = true;
8560		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8561			flow);
8562	}
8563
8564	if (!added)
8565		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8566
8567	return 0;
8568}
8569
8570/**
8571 * ice_add_channel - add a channel by adding VSI
8572 * @pf: ptr to PF device
8573 * @sw_id: underlying HW switching element ID
8574 * @ch: ptr to channel structure
8575 *
8576 * Add a channel (VSI) using add_vsi and queue_map
8577 */
8578static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8579{
8580	struct device *dev = ice_pf_to_dev(pf);
8581	struct ice_vsi *vsi;
8582
8583	if (ch->type != ICE_VSI_CHNL) {
8584		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8585		return -EINVAL;
8586	}
8587
8588	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8589	if (!vsi || vsi->type != ICE_VSI_CHNL) {
8590		dev_err(dev, "create chnl VSI failure\n");
8591		return -EINVAL;
8592	}
8593
8594	ice_add_vsi_to_fdir(pf, vsi);
8595
8596	ch->sw_id = sw_id;
8597	ch->vsi_num = vsi->vsi_num;
8598	ch->info.mapping_flags = vsi->info.mapping_flags;
8599	ch->ch_vsi = vsi;
8600	/* set the back pointer of channel for newly created VSI */
8601	vsi->ch = ch;
8602
8603	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8604	       sizeof(vsi->info.q_mapping));
8605	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8606	       sizeof(vsi->info.tc_mapping));
8607
8608	return 0;
8609}
8610
8611/**
8612 * ice_chnl_cfg_res
8613 * @vsi: the VSI being setup
8614 * @ch: ptr to channel structure
8615 *
8616 * Configure channel specific resources such as rings, vector.
8617 */
8618static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8619{
8620	int i;
8621
8622	for (i = 0; i < ch->num_txq; i++) {
8623		struct ice_q_vector *tx_q_vector, *rx_q_vector;
8624		struct ice_ring_container *rc;
8625		struct ice_tx_ring *tx_ring;
8626		struct ice_rx_ring *rx_ring;
8627
8628		tx_ring = vsi->tx_rings[ch->base_q + i];
8629		rx_ring = vsi->rx_rings[ch->base_q + i];
8630		if (!tx_ring || !rx_ring)
8631			continue;
8632
8633		/* setup ring being channel enabled */
8634		tx_ring->ch = ch;
8635		rx_ring->ch = ch;
8636
8637		/* following code block sets up vector specific attributes */
8638		tx_q_vector = tx_ring->q_vector;
8639		rx_q_vector = rx_ring->q_vector;
8640		if (!tx_q_vector && !rx_q_vector)
8641			continue;
8642
8643		if (tx_q_vector) {
8644			tx_q_vector->ch = ch;
8645			/* setup Tx and Rx ITR setting if DIM is off */
8646			rc = &tx_q_vector->tx;
8647			if (!ITR_IS_DYNAMIC(rc))
8648				ice_write_itr(rc, rc->itr_setting);
8649		}
8650		if (rx_q_vector) {
8651			rx_q_vector->ch = ch;
8652			/* setup Tx and Rx ITR setting if DIM is off */
8653			rc = &rx_q_vector->rx;
8654			if (!ITR_IS_DYNAMIC(rc))
8655				ice_write_itr(rc, rc->itr_setting);
8656		}
8657	}
8658
8659	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8660	 * GLINT_ITR register would have written to perform in-context
8661	 * update, hence perform flush
8662	 */
8663	if (ch->num_txq || ch->num_rxq)
8664		ice_flush(&vsi->back->hw);
8665}
8666
8667/**
8668 * ice_cfg_chnl_all_res - configure channel resources
8669 * @vsi: pte to main_vsi
8670 * @ch: ptr to channel structure
8671 *
8672 * This function configures channel specific resources such as flow-director
8673 * counter index, and other resources such as queues, vectors, ITR settings
8674 */
8675static void
8676ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8677{
8678	/* configure channel (aka ADQ) resources such as queues, vectors,
8679	 * ITR settings for channel specific vectors and anything else
8680	 */
8681	ice_chnl_cfg_res(vsi, ch);
8682}
8683
8684/**
8685 * ice_setup_hw_channel - setup new channel
8686 * @pf: ptr to PF device
8687 * @vsi: the VSI being setup
8688 * @ch: ptr to channel structure
8689 * @sw_id: underlying HW switching element ID
8690 * @type: type of channel to be created (VMDq2/VF)
8691 *
8692 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8693 * and configures Tx rings accordingly
8694 */
8695static int
8696ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8697		     struct ice_channel *ch, u16 sw_id, u8 type)
8698{
8699	struct device *dev = ice_pf_to_dev(pf);
8700	int ret;
8701
8702	ch->base_q = vsi->next_base_q;
8703	ch->type = type;
8704
8705	ret = ice_add_channel(pf, sw_id, ch);
8706	if (ret) {
8707		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8708		return ret;
8709	}
8710
8711	/* configure/setup ADQ specific resources */
8712	ice_cfg_chnl_all_res(vsi, ch);
8713
8714	/* make sure to update the next_base_q so that subsequent channel's
8715	 * (aka ADQ) VSI queue map is correct
8716	 */
8717	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8718	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8719		ch->num_rxq);
8720
8721	return 0;
8722}
8723
8724/**
8725 * ice_setup_channel - setup new channel using uplink element
8726 * @pf: ptr to PF device
8727 * @vsi: the VSI being setup
8728 * @ch: ptr to channel structure
8729 *
8730 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8731 * and uplink switching element
8732 */
8733static bool
8734ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8735		  struct ice_channel *ch)
8736{
8737	struct device *dev = ice_pf_to_dev(pf);
8738	u16 sw_id;
8739	int ret;
8740
8741	if (vsi->type != ICE_VSI_PF) {
8742		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8743		return false;
8744	}
8745
8746	sw_id = pf->first_sw->sw_id;
8747
8748	/* create channel (VSI) */
8749	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8750	if (ret) {
8751		dev_err(dev, "failed to setup hw_channel\n");
8752		return false;
8753	}
8754	dev_dbg(dev, "successfully created channel()\n");
8755
8756	return ch->ch_vsi ? true : false;
8757}
8758
8759/**
8760 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8761 * @vsi: VSI to be configured
8762 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8763 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8764 */
8765static int
8766ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8767{
8768	int err;
8769
8770	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8771	if (err)
8772		return err;
8773
8774	return ice_set_max_bw_limit(vsi, max_tx_rate);
8775}
8776
8777/**
8778 * ice_create_q_channel - function to create channel
8779 * @vsi: VSI to be configured
8780 * @ch: ptr to channel (it contains channel specific params)
8781 *
8782 * This function creates channel (VSI) using num_queues specified by user,
8783 * reconfigs RSS if needed.
8784 */
8785static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8786{
8787	struct ice_pf *pf = vsi->back;
8788	struct device *dev;
8789
8790	if (!ch)
8791		return -EINVAL;
8792
8793	dev = ice_pf_to_dev(pf);
8794	if (!ch->num_txq || !ch->num_rxq) {
8795		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8796		return -EINVAL;
8797	}
8798
8799	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8800		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8801			vsi->cnt_q_avail, ch->num_txq);
8802		return -EINVAL;
8803	}
8804
8805	if (!ice_setup_channel(pf, vsi, ch)) {
8806		dev_info(dev, "Failed to setup channel\n");
8807		return -EINVAL;
8808	}
8809	/* configure BW rate limit */
8810	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8811		int ret;
8812
8813		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8814				       ch->min_tx_rate);
8815		if (ret)
8816			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8817				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8818		else
8819			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8820				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8821	}
8822
8823	vsi->cnt_q_avail -= ch->num_txq;
8824
8825	return 0;
8826}
8827
8828/**
8829 * ice_rem_all_chnl_fltrs - removes all channel filters
8830 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8831 *
8832 * Remove all advanced switch filters only if they are channel specific
8833 * tc-flower based filter
8834 */
8835static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8836{
8837	struct ice_tc_flower_fltr *fltr;
8838	struct hlist_node *node;
8839
8840	/* to remove all channel filters, iterate an ordered list of filters */
8841	hlist_for_each_entry_safe(fltr, node,
8842				  &pf->tc_flower_fltr_list,
8843				  tc_flower_node) {
8844		struct ice_rule_query_data rule;
8845		int status;
8846
8847		/* for now process only channel specific filters */
8848		if (!ice_is_chnl_fltr(fltr))
8849			continue;
8850
8851		rule.rid = fltr->rid;
8852		rule.rule_id = fltr->rule_id;
8853		rule.vsi_handle = fltr->dest_vsi_handle;
8854		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8855		if (status) {
8856			if (status == -ENOENT)
8857				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8858					rule.rule_id);
8859			else
8860				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8861					status);
8862		} else if (fltr->dest_vsi) {
8863			/* update advanced switch filter count */
8864			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8865				u32 flags = fltr->flags;
8866
8867				fltr->dest_vsi->num_chnl_fltr--;
8868				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8869					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8870					pf->num_dmac_chnl_fltrs--;
8871			}
8872		}
8873
8874		hlist_del(&fltr->tc_flower_node);
8875		kfree(fltr);
8876	}
8877}
8878
8879/**
8880 * ice_remove_q_channels - Remove queue channels for the TCs
8881 * @vsi: VSI to be configured
8882 * @rem_fltr: delete advanced switch filter or not
8883 *
8884 * Remove queue channels for the TCs
8885 */
8886static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8887{
8888	struct ice_channel *ch, *ch_tmp;
8889	struct ice_pf *pf = vsi->back;
8890	int i;
8891
8892	/* remove all tc-flower based filter if they are channel filters only */
8893	if (rem_fltr)
8894		ice_rem_all_chnl_fltrs(pf);
8895
8896	/* remove ntuple filters since queue configuration is being changed */
8897	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
8898		struct ice_hw *hw = &pf->hw;
8899
8900		mutex_lock(&hw->fdir_fltr_lock);
8901		ice_fdir_del_all_fltrs(vsi);
8902		mutex_unlock(&hw->fdir_fltr_lock);
8903	}
8904
8905	/* perform cleanup for channels if they exist */
8906	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8907		struct ice_vsi *ch_vsi;
8908
8909		list_del(&ch->list);
8910		ch_vsi = ch->ch_vsi;
8911		if (!ch_vsi) {
8912			kfree(ch);
8913			continue;
8914		}
8915
8916		/* Reset queue contexts */
8917		for (i = 0; i < ch->num_rxq; i++) {
8918			struct ice_tx_ring *tx_ring;
8919			struct ice_rx_ring *rx_ring;
8920
8921			tx_ring = vsi->tx_rings[ch->base_q + i];
8922			rx_ring = vsi->rx_rings[ch->base_q + i];
8923			if (tx_ring) {
8924				tx_ring->ch = NULL;
8925				if (tx_ring->q_vector)
8926					tx_ring->q_vector->ch = NULL;
8927			}
8928			if (rx_ring) {
8929				rx_ring->ch = NULL;
8930				if (rx_ring->q_vector)
8931					rx_ring->q_vector->ch = NULL;
8932			}
8933		}
8934
8935		/* Release FD resources for the channel VSI */
8936		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8937
8938		/* clear the VSI from scheduler tree */
8939		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8940
8941		/* Delete VSI from FW, PF and HW VSI arrays */
8942		ice_vsi_delete(ch->ch_vsi);
8943
8944		/* free the channel */
8945		kfree(ch);
8946	}
8947
8948	/* clear the channel VSI map which is stored in main VSI */
8949	ice_for_each_chnl_tc(i)
8950		vsi->tc_map_vsi[i] = NULL;
8951
8952	/* reset main VSI's all TC information */
8953	vsi->all_enatc = 0;
8954	vsi->all_numtc = 0;
8955}
8956
8957/**
8958 * ice_rebuild_channels - rebuild channel
8959 * @pf: ptr to PF
8960 *
8961 * Recreate channel VSIs and replay filters
8962 */
8963static int ice_rebuild_channels(struct ice_pf *pf)
8964{
8965	struct device *dev = ice_pf_to_dev(pf);
8966	struct ice_vsi *main_vsi;
8967	bool rem_adv_fltr = true;
8968	struct ice_channel *ch;
8969	struct ice_vsi *vsi;
8970	int tc_idx = 1;
8971	int i, err;
8972
8973	main_vsi = ice_get_main_vsi(pf);
8974	if (!main_vsi)
8975		return 0;
8976
8977	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8978	    main_vsi->old_numtc == 1)
8979		return 0; /* nothing to be done */
8980
8981	/* reconfigure main VSI based on old value of TC and cached values
8982	 * for MQPRIO opts
8983	 */
8984	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8985	if (err) {
8986		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8987			main_vsi->old_ena_tc, main_vsi->vsi_num);
8988		return err;
8989	}
8990
8991	/* rebuild ADQ VSIs */
8992	ice_for_each_vsi(pf, i) {
8993		enum ice_vsi_type type;
8994
8995		vsi = pf->vsi[i];
8996		if (!vsi || vsi->type != ICE_VSI_CHNL)
8997			continue;
8998
8999		type = vsi->type;
9000
9001		/* rebuild ADQ VSI */
9002		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
9003		if (err) {
9004			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
9005				ice_vsi_type_str(type), vsi->idx, err);
9006			goto cleanup;
9007		}
9008
9009		/* Re-map HW VSI number, using VSI handle that has been
9010		 * previously validated in ice_replay_vsi() call above
9011		 */
9012		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
9013
9014		/* replay filters for the VSI */
9015		err = ice_replay_vsi(&pf->hw, vsi->idx);
9016		if (err) {
9017			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
9018				ice_vsi_type_str(type), err, vsi->idx);
9019			rem_adv_fltr = false;
9020			goto cleanup;
9021		}
9022		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
9023			 ice_vsi_type_str(type), vsi->idx);
9024
9025		/* store ADQ VSI at correct TC index in main VSI's
9026		 * map of TC to VSI
9027		 */
9028		main_vsi->tc_map_vsi[tc_idx++] = vsi;
9029	}
9030
9031	/* ADQ VSI(s) has been rebuilt successfully, so setup
9032	 * channel for main VSI's Tx and Rx rings
9033	 */
9034	list_for_each_entry(ch, &main_vsi->ch_list, list) {
9035		struct ice_vsi *ch_vsi;
9036
9037		ch_vsi = ch->ch_vsi;
9038		if (!ch_vsi)
9039			continue;
9040
9041		/* reconfig channel resources */
9042		ice_cfg_chnl_all_res(main_vsi, ch);
9043
9044		/* replay BW rate limit if it is non-zero */
9045		if (!ch->max_tx_rate && !ch->min_tx_rate)
9046			continue;
9047
9048		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
9049				       ch->min_tx_rate);
9050		if (err)
9051			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9052				err, ch->max_tx_rate, ch->min_tx_rate,
9053				ch_vsi->vsi_num);
9054		else
9055			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9056				ch->max_tx_rate, ch->min_tx_rate,
9057				ch_vsi->vsi_num);
9058	}
9059
9060	/* reconfig RSS for main VSI */
9061	if (main_vsi->ch_rss_size)
9062		ice_vsi_cfg_rss_lut_key(main_vsi);
9063
9064	return 0;
9065
9066cleanup:
9067	ice_remove_q_channels(main_vsi, rem_adv_fltr);
9068	return err;
9069}
9070
9071/**
9072 * ice_create_q_channels - Add queue channel for the given TCs
9073 * @vsi: VSI to be configured
9074 *
9075 * Configures queue channel mapping to the given TCs
9076 */
9077static int ice_create_q_channels(struct ice_vsi *vsi)
9078{
9079	struct ice_pf *pf = vsi->back;
9080	struct ice_channel *ch;
9081	int ret = 0, i;
9082
9083	ice_for_each_chnl_tc(i) {
9084		if (!(vsi->all_enatc & BIT(i)))
9085			continue;
9086
9087		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
9088		if (!ch) {
9089			ret = -ENOMEM;
9090			goto err_free;
9091		}
9092		INIT_LIST_HEAD(&ch->list);
9093		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
9094		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
9095		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
9096		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
9097		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
9098
9099		/* convert to Kbits/s */
9100		if (ch->max_tx_rate)
9101			ch->max_tx_rate = div_u64(ch->max_tx_rate,
9102						  ICE_BW_KBPS_DIVISOR);
9103		if (ch->min_tx_rate)
9104			ch->min_tx_rate = div_u64(ch->min_tx_rate,
9105						  ICE_BW_KBPS_DIVISOR);
9106
9107		ret = ice_create_q_channel(vsi, ch);
9108		if (ret) {
9109			dev_err(ice_pf_to_dev(pf),
9110				"failed creating channel TC:%d\n", i);
9111			kfree(ch);
9112			goto err_free;
9113		}
9114		list_add_tail(&ch->list, &vsi->ch_list);
9115		vsi->tc_map_vsi[i] = ch->ch_vsi;
9116		dev_dbg(ice_pf_to_dev(pf),
9117			"successfully created channel: VSI %pK\n", ch->ch_vsi);
9118	}
9119	return 0;
9120
9121err_free:
9122	ice_remove_q_channels(vsi, false);
9123
9124	return ret;
9125}
9126
9127/**
9128 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9129 * @netdev: net device to configure
9130 * @type_data: TC offload data
9131 */
9132static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
9133{
9134	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9135	struct ice_netdev_priv *np = netdev_priv(netdev);
9136	struct ice_vsi *vsi = np->vsi;
9137	struct ice_pf *pf = vsi->back;
9138	u16 mode, ena_tc_qdisc = 0;
9139	int cur_txq, cur_rxq;
9140	u8 hw = 0, num_tcf;
9141	struct device *dev;
9142	int ret, i;
9143
9144	dev = ice_pf_to_dev(pf);
9145	num_tcf = mqprio_qopt->qopt.num_tc;
9146	hw = mqprio_qopt->qopt.hw;
9147	mode = mqprio_qopt->mode;
9148	if (!hw) {
9149		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9150		vsi->ch_rss_size = 0;
9151		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9152		goto config_tcf;
9153	}
9154
9155	/* Generate queue region map for number of TCF requested */
9156	for (i = 0; i < num_tcf; i++)
9157		ena_tc_qdisc |= BIT(i);
9158
9159	switch (mode) {
9160	case TC_MQPRIO_MODE_CHANNEL:
9161
9162		if (pf->hw.port_info->is_custom_tx_enabled) {
9163			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
9164			return -EBUSY;
9165		}
9166		ice_tear_down_devlink_rate_tree(pf);
9167
9168		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
9169		if (ret) {
9170			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
9171				   ret);
9172			return ret;
9173		}
9174		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9175		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9176		/* don't assume state of hw_tc_offload during driver load
9177		 * and set the flag for TC flower filter if hw_tc_offload
9178		 * already ON
9179		 */
9180		if (vsi->netdev->features & NETIF_F_HW_TC)
9181			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
9182		break;
9183	default:
9184		return -EINVAL;
9185	}
9186
9187config_tcf:
9188
9189	/* Requesting same TCF configuration as already enabled */
9190	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
9191	    mode != TC_MQPRIO_MODE_CHANNEL)
9192		return 0;
9193
9194	/* Pause VSI queues */
9195	ice_dis_vsi(vsi, true);
9196
9197	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9198		ice_remove_q_channels(vsi, true);
9199
9200	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9201		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9202				     num_online_cpus());
9203		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9204				     num_online_cpus());
9205	} else {
9206		/* logic to rebuild VSI, same like ethtool -L */
9207		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
9208
9209		for (i = 0; i < num_tcf; i++) {
9210			if (!(ena_tc_qdisc & BIT(i)))
9211				continue;
9212
9213			offset = vsi->mqprio_qopt.qopt.offset[i];
9214			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9215			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9216		}
9217		vsi->req_txq = offset + qcount_tx;
9218		vsi->req_rxq = offset + qcount_rx;
9219
9220		/* store away original rss_size info, so that it gets reused
9221		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
9222		 * determine, what should be the rss_sizefor main VSI
9223		 */
9224		vsi->orig_rss_size = vsi->rss_size;
9225	}
9226
9227	/* save current values of Tx and Rx queues before calling VSI rebuild
9228	 * for fallback option
9229	 */
9230	cur_txq = vsi->num_txq;
9231	cur_rxq = vsi->num_rxq;
9232
9233	/* proceed with rebuild main VSI using correct number of queues */
9234	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9235	if (ret) {
9236		/* fallback to current number of queues */
9237		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
9238		vsi->req_txq = cur_txq;
9239		vsi->req_rxq = cur_rxq;
9240		clear_bit(ICE_RESET_FAILED, pf->state);
9241		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9242			dev_err(dev, "Rebuild of main VSI failed again\n");
9243			return ret;
9244		}
9245	}
9246
9247	vsi->all_numtc = num_tcf;
9248	vsi->all_enatc = ena_tc_qdisc;
9249	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9250	if (ret) {
9251		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
9252			   vsi->vsi_num);
9253		goto exit;
9254	}
9255
9256	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9257		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9258		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9259
9260		/* set TC0 rate limit if specified */
9261		if (max_tx_rate || min_tx_rate) {
9262			/* convert to Kbits/s */
9263			if (max_tx_rate)
9264				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
9265			if (min_tx_rate)
9266				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
9267
9268			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9269			if (!ret) {
9270				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
9271					max_tx_rate, min_tx_rate, vsi->vsi_num);
9272			} else {
9273				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
9274					max_tx_rate, min_tx_rate, vsi->vsi_num);
9275				goto exit;
9276			}
9277		}
9278		ret = ice_create_q_channels(vsi);
9279		if (ret) {
9280			netdev_err(netdev, "failed configuring queue channels\n");
9281			goto exit;
9282		} else {
9283			netdev_dbg(netdev, "successfully configured channels\n");
9284		}
9285	}
9286
9287	if (vsi->ch_rss_size)
9288		ice_vsi_cfg_rss_lut_key(vsi);
9289
9290exit:
9291	/* if error, reset the all_numtc and all_enatc */
9292	if (ret) {
9293		vsi->all_numtc = 0;
9294		vsi->all_enatc = 0;
9295	}
9296	/* resume VSI */
9297	ice_ena_vsi(vsi, true);
9298
9299	return ret;
9300}
9301
9302static LIST_HEAD(ice_block_cb_list);
9303
9304static int
9305ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9306	     void *type_data)
9307{
9308	struct ice_netdev_priv *np = netdev_priv(netdev);
9309	struct ice_pf *pf = np->vsi->back;
9310	bool locked = false;
9311	int err;
9312
9313	switch (type) {
9314	case TC_SETUP_BLOCK:
9315		return flow_block_cb_setup_simple(type_data,
9316						  &ice_block_cb_list,
9317						  ice_setup_tc_block_cb,
9318						  np, np, true);
9319	case TC_SETUP_QDISC_MQPRIO:
9320		if (ice_is_eswitch_mode_switchdev(pf)) {
9321			netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
9322			return -EOPNOTSUPP;
9323		}
9324
9325		if (pf->adev) {
9326			mutex_lock(&pf->adev_mutex);
9327			device_lock(&pf->adev->dev);
9328			locked = true;
9329			if (pf->adev->dev.driver) {
9330				netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
9331				err = -EBUSY;
9332				goto adev_unlock;
9333			}
9334		}
9335
9336		/* setup traffic classifier for receive side */
9337		mutex_lock(&pf->tc_mutex);
9338		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9339		mutex_unlock(&pf->tc_mutex);
9340
9341adev_unlock:
9342		if (locked) {
9343			device_unlock(&pf->adev->dev);
9344			mutex_unlock(&pf->adev_mutex);
9345		}
9346		return err;
9347	default:
9348		return -EOPNOTSUPP;
9349	}
9350	return -EOPNOTSUPP;
9351}
9352
9353static struct ice_indr_block_priv *
9354ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9355			   struct net_device *netdev)
9356{
9357	struct ice_indr_block_priv *cb_priv;
9358
9359	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9360		if (!cb_priv->netdev)
9361			return NULL;
9362		if (cb_priv->netdev == netdev)
9363			return cb_priv;
9364	}
9365	return NULL;
9366}
9367
9368static int
9369ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9370			void *indr_priv)
9371{
9372	struct ice_indr_block_priv *priv = indr_priv;
9373	struct ice_netdev_priv *np = priv->np;
9374
9375	switch (type) {
9376	case TC_SETUP_CLSFLOWER:
9377		return ice_setup_tc_cls_flower(np, priv->netdev,
9378					       (struct flow_cls_offload *)
9379					       type_data);
9380	default:
9381		return -EOPNOTSUPP;
9382	}
9383}
9384
9385static int
9386ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9387			struct ice_netdev_priv *np,
9388			struct flow_block_offload *f, void *data,
9389			void (*cleanup)(struct flow_block_cb *block_cb))
9390{
9391	struct ice_indr_block_priv *indr_priv;
9392	struct flow_block_cb *block_cb;
9393
9394	if (!ice_is_tunnel_supported(netdev) &&
9395	    !(is_vlan_dev(netdev) &&
9396	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
9397		return -EOPNOTSUPP;
9398
9399	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9400		return -EOPNOTSUPP;
9401
9402	switch (f->command) {
9403	case FLOW_BLOCK_BIND:
9404		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9405		if (indr_priv)
9406			return -EEXIST;
9407
9408		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9409		if (!indr_priv)
9410			return -ENOMEM;
9411
9412		indr_priv->netdev = netdev;
9413		indr_priv->np = np;
9414		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9415
9416		block_cb =
9417			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9418						 indr_priv, indr_priv,
9419						 ice_rep_indr_tc_block_unbind,
9420						 f, netdev, sch, data, np,
9421						 cleanup);
9422
9423		if (IS_ERR(block_cb)) {
9424			list_del(&indr_priv->list);
9425			kfree(indr_priv);
9426			return PTR_ERR(block_cb);
9427		}
9428		flow_block_cb_add(block_cb, f);
9429		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9430		break;
9431	case FLOW_BLOCK_UNBIND:
9432		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9433		if (!indr_priv)
9434			return -ENOENT;
9435
9436		block_cb = flow_block_cb_lookup(f->block,
9437						ice_indr_setup_block_cb,
9438						indr_priv);
9439		if (!block_cb)
9440			return -ENOENT;
9441
9442		flow_indr_block_cb_remove(block_cb, f);
9443
9444		list_del(&block_cb->driver_list);
9445		break;
9446	default:
9447		return -EOPNOTSUPP;
9448	}
9449	return 0;
9450}
9451
9452static int
9453ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9454		     void *cb_priv, enum tc_setup_type type, void *type_data,
9455		     void *data,
9456		     void (*cleanup)(struct flow_block_cb *block_cb))
9457{
9458	switch (type) {
9459	case TC_SETUP_BLOCK:
9460		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9461					       data, cleanup);
9462
9463	default:
9464		return -EOPNOTSUPP;
9465	}
9466}
9467
9468/**
9469 * ice_open - Called when a network interface becomes active
9470 * @netdev: network interface device structure
9471 *
9472 * The open entry point is called when a network interface is made
9473 * active by the system (IFF_UP). At this point all resources needed
9474 * for transmit and receive operations are allocated, the interrupt
9475 * handler is registered with the OS, the netdev watchdog is enabled,
9476 * and the stack is notified that the interface is ready.
9477 *
9478 * Returns 0 on success, negative value on failure
9479 */
9480int ice_open(struct net_device *netdev)
9481{
9482	struct ice_netdev_priv *np = netdev_priv(netdev);
9483	struct ice_pf *pf = np->vsi->back;
9484
9485	if (ice_is_reset_in_progress(pf->state)) {
9486		netdev_err(netdev, "can't open net device while reset is in progress");
9487		return -EBUSY;
9488	}
9489
9490	return ice_open_internal(netdev);
9491}
9492
9493/**
9494 * ice_open_internal - Called when a network interface becomes active
9495 * @netdev: network interface device structure
9496 *
9497 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9498 * handling routine
9499 *
9500 * Returns 0 on success, negative value on failure
9501 */
9502int ice_open_internal(struct net_device *netdev)
9503{
9504	struct ice_netdev_priv *np = netdev_priv(netdev);
9505	struct ice_vsi *vsi = np->vsi;
9506	struct ice_pf *pf = vsi->back;
9507	struct ice_port_info *pi;
 
9508	int err;
9509
9510	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9511		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9512		return -EIO;
9513	}
9514
9515	netif_carrier_off(netdev);
9516
9517	pi = vsi->port_info;
9518	err = ice_update_link_info(pi);
9519	if (err) {
9520		netdev_err(netdev, "Failed to get link info, error %d\n", err);
9521		return err;
 
9522	}
9523
9524	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9525
9526	/* Set PHY if there is media, otherwise, turn off PHY */
9527	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9528		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9529		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9530			err = ice_init_phy_user_cfg(pi);
9531			if (err) {
9532				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9533					   err);
9534				return err;
9535			}
9536		}
9537
9538		err = ice_configure_phy(vsi);
9539		if (err) {
9540			netdev_err(netdev, "Failed to set physical link up, error %d\n",
9541				   err);
9542			return err;
9543		}
9544	} else {
9545		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9546		ice_set_link(vsi, false);
9547	}
9548
9549	err = ice_vsi_open(vsi);
9550	if (err)
9551		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9552			   vsi->vsi_num, vsi->vsw->sw_id);
9553
9554	/* Update existing tunnels information */
9555	udp_tunnel_get_rx_info(netdev);
9556
9557	return err;
9558}
9559
9560/**
9561 * ice_stop - Disables a network interface
9562 * @netdev: network interface device structure
9563 *
9564 * The stop entry point is called when an interface is de-activated by the OS,
9565 * and the netdevice enters the DOWN state. The hardware is still under the
9566 * driver's control, but the netdev interface is disabled.
9567 *
9568 * Returns success only - not allowed to fail
9569 */
9570int ice_stop(struct net_device *netdev)
9571{
9572	struct ice_netdev_priv *np = netdev_priv(netdev);
9573	struct ice_vsi *vsi = np->vsi;
9574	struct ice_pf *pf = vsi->back;
9575
9576	if (ice_is_reset_in_progress(pf->state)) {
9577		netdev_err(netdev, "can't stop net device while reset is in progress");
9578		return -EBUSY;
9579	}
9580
9581	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9582		int link_err = ice_force_phys_link_state(vsi, false);
9583
9584		if (link_err) {
9585			if (link_err == -ENOMEDIUM)
9586				netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9587					    vsi->vsi_num);
9588			else
9589				netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9590					   vsi->vsi_num, link_err);
9591
9592			ice_vsi_close(vsi);
9593			return -EIO;
9594		}
9595	}
9596
9597	ice_vsi_close(vsi);
9598
9599	return 0;
9600}
9601
9602/**
9603 * ice_features_check - Validate encapsulated packet conforms to limits
9604 * @skb: skb buffer
9605 * @netdev: This port's netdev
9606 * @features: Offload features that the stack believes apply
9607 */
9608static netdev_features_t
9609ice_features_check(struct sk_buff *skb,
9610		   struct net_device __always_unused *netdev,
9611		   netdev_features_t features)
9612{
9613	bool gso = skb_is_gso(skb);
9614	size_t len;
9615
9616	/* No point in doing any of this if neither checksum nor GSO are
9617	 * being requested for this frame. We can rule out both by just
9618	 * checking for CHECKSUM_PARTIAL
9619	 */
9620	if (skb->ip_summed != CHECKSUM_PARTIAL)
9621		return features;
9622
9623	/* We cannot support GSO if the MSS is going to be less than
9624	 * 64 bytes. If it is then we need to drop support for GSO.
9625	 */
9626	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9627		features &= ~NETIF_F_GSO_MASK;
9628
9629	len = skb_network_offset(skb);
9630	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9631		goto out_rm_features;
9632
9633	len = skb_network_header_len(skb);
9634	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9635		goto out_rm_features;
9636
9637	if (skb->encapsulation) {
9638		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
9639		 * the case of IPIP frames, the transport header pointer is
9640		 * after the inner header! So check to make sure that this
9641		 * is a GRE or UDP_TUNNEL frame before doing that math.
9642		 */
9643		if (gso && (skb_shinfo(skb)->gso_type &
9644			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9645			len = skb_inner_network_header(skb) -
9646			      skb_transport_header(skb);
9647			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9648				goto out_rm_features;
9649		}
9650
9651		len = skb_inner_network_header_len(skb);
 
9652		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9653			goto out_rm_features;
9654	}
9655
9656	return features;
9657out_rm_features:
9658	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9659}
9660
9661static const struct net_device_ops ice_netdev_safe_mode_ops = {
9662	.ndo_open = ice_open,
9663	.ndo_stop = ice_stop,
9664	.ndo_start_xmit = ice_start_xmit,
9665	.ndo_set_mac_address = ice_set_mac_address,
9666	.ndo_validate_addr = eth_validate_addr,
9667	.ndo_change_mtu = ice_change_mtu,
9668	.ndo_get_stats64 = ice_get_stats64,
9669	.ndo_tx_timeout = ice_tx_timeout,
9670	.ndo_bpf = ice_xdp_safe_mode,
9671};
9672
9673static const struct net_device_ops ice_netdev_ops = {
9674	.ndo_open = ice_open,
9675	.ndo_stop = ice_stop,
9676	.ndo_start_xmit = ice_start_xmit,
9677	.ndo_select_queue = ice_select_queue,
9678	.ndo_features_check = ice_features_check,
9679	.ndo_fix_features = ice_fix_features,
9680	.ndo_set_rx_mode = ice_set_rx_mode,
9681	.ndo_set_mac_address = ice_set_mac_address,
9682	.ndo_validate_addr = eth_validate_addr,
9683	.ndo_change_mtu = ice_change_mtu,
9684	.ndo_get_stats64 = ice_get_stats64,
9685	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
9686	.ndo_eth_ioctl = ice_eth_ioctl,
9687	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9688	.ndo_set_vf_mac = ice_set_vf_mac,
9689	.ndo_get_vf_config = ice_get_vf_cfg,
9690	.ndo_set_vf_trust = ice_set_vf_trust,
9691	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
9692	.ndo_set_vf_link_state = ice_set_vf_link_state,
9693	.ndo_get_vf_stats = ice_get_vf_stats,
9694	.ndo_set_vf_rate = ice_set_vf_bw,
9695	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9696	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9697	.ndo_setup_tc = ice_setup_tc,
9698	.ndo_set_features = ice_set_features,
9699	.ndo_bridge_getlink = ice_bridge_getlink,
9700	.ndo_bridge_setlink = ice_bridge_setlink,
9701	.ndo_fdb_add = ice_fdb_add,
9702	.ndo_fdb_del = ice_fdb_del,
9703#ifdef CONFIG_RFS_ACCEL
9704	.ndo_rx_flow_steer = ice_rx_flow_steer,
9705#endif
9706	.ndo_tx_timeout = ice_tx_timeout,
9707	.ndo_bpf = ice_xdp,
9708	.ndo_xdp_xmit = ice_xdp_xmit,
9709	.ndo_xsk_wakeup = ice_xsk_wakeup,
9710};
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4/* Intel(R) Ethernet Connection E800 Series Linux Driver */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <generated/utsrelease.h>
 
   9#include "ice.h"
  10#include "ice_base.h"
  11#include "ice_lib.h"
  12#include "ice_fltr.h"
  13#include "ice_dcb_lib.h"
  14#include "ice_dcb_nl.h"
  15#include "ice_devlink.h"
 
 
 
  16/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
  17 * ice tracepoint functions. This must be done exactly once across the
  18 * ice driver.
  19 */
  20#define CREATE_TRACE_POINTS
  21#include "ice_trace.h"
 
 
 
 
  22
  23#define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
  24static const char ice_driver_string[] = DRV_SUMMARY;
  25static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
  26
  27/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
  28#define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
  29#define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
  30
  31MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  32MODULE_DESCRIPTION(DRV_SUMMARY);
 
  33MODULE_LICENSE("GPL v2");
  34MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
  35
  36static int debug = -1;
  37module_param(debug, int, 0644);
  38#ifndef CONFIG_DYNAMIC_DEBUG
  39MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
  40#else
  41MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
  42#endif /* !CONFIG_DYNAMIC_DEBUG */
  43
  44static DEFINE_IDA(ice_aux_ida);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45
  46static struct workqueue_struct *ice_wq;
 
  47static const struct net_device_ops ice_netdev_safe_mode_ops;
  48static const struct net_device_ops ice_netdev_ops;
  49static int ice_vsi_open(struct ice_vsi *vsi);
  50
  51static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
  52
  53static void ice_vsi_release_all(struct ice_pf *pf);
  54
  55bool netif_is_ice(struct net_device *dev)
 
 
 
 
 
 
 
 
 
  56{
  57	return dev && (dev->netdev_ops == &ice_netdev_ops);
 
  58}
  59
  60/**
  61 * ice_get_tx_pending - returns number of Tx descriptors not processed
  62 * @ring: the ring of descriptors
  63 */
  64static u16 ice_get_tx_pending(struct ice_ring *ring)
  65{
  66	u16 head, tail;
  67
  68	head = ring->next_to_clean;
  69	tail = ring->next_to_use;
  70
  71	if (head != tail)
  72		return (head < tail) ?
  73			tail - head : (tail + ring->count - head);
  74	return 0;
  75}
  76
  77/**
  78 * ice_check_for_hang_subtask - check for and recover hung queues
  79 * @pf: pointer to PF struct
  80 */
  81static void ice_check_for_hang_subtask(struct ice_pf *pf)
  82{
  83	struct ice_vsi *vsi = NULL;
  84	struct ice_hw *hw;
  85	unsigned int i;
  86	int packets;
  87	u32 v;
  88
  89	ice_for_each_vsi(pf, v)
  90		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
  91			vsi = pf->vsi[v];
  92			break;
  93		}
  94
  95	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
  96		return;
  97
  98	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
  99		return;
 100
 101	hw = &vsi->back->hw;
 102
 103	for (i = 0; i < vsi->num_txq; i++) {
 104		struct ice_ring *tx_ring = vsi->tx_rings[i];
 
 
 
 
 
 
 
 
 
 
 105
 106		if (tx_ring && tx_ring->desc) {
 107			/* If packet counter has not changed the queue is
 108			 * likely stalled, so force an interrupt for this
 109			 * queue.
 110			 *
 111			 * prev_pkt would be negative if there was no
 112			 * pending work.
 113			 */
 114			packets = tx_ring->stats.pkts & INT_MAX;
 115			if (tx_ring->tx_stats.prev_pkt == packets) {
 116				/* Trigger sw interrupt to revive the queue */
 117				ice_trigger_sw_intr(hw, tx_ring->q_vector);
 118				continue;
 119			}
 120
 121			/* Memory barrier between read of packet count and call
 122			 * to ice_get_tx_pending()
 123			 */
 124			smp_rmb();
 125			tx_ring->tx_stats.prev_pkt =
 126			    ice_get_tx_pending(tx_ring) ? packets : -1;
 127		}
 128	}
 129}
 130
 131/**
 132 * ice_init_mac_fltr - Set initial MAC filters
 133 * @pf: board private structure
 134 *
 135 * Set initial set of MAC filters for PF VSI; configure filters for permanent
 136 * address and broadcast address. If an error is encountered, netdevice will be
 137 * unregistered.
 138 */
 139static int ice_init_mac_fltr(struct ice_pf *pf)
 140{
 141	enum ice_status status;
 142	struct ice_vsi *vsi;
 143	u8 *perm_addr;
 144
 145	vsi = ice_get_main_vsi(pf);
 146	if (!vsi)
 147		return -EINVAL;
 148
 149	perm_addr = vsi->port_info->mac.perm_addr;
 150	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
 151	if (status)
 152		return -EIO;
 153
 154	return 0;
 155}
 156
 157/**
 158 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
 159 * @netdev: the net device on which the sync is happening
 160 * @addr: MAC address to sync
 161 *
 162 * This is a callback function which is called by the in kernel device sync
 163 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
 164 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
 165 * MAC filters from the hardware.
 166 */
 167static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
 168{
 169	struct ice_netdev_priv *np = netdev_priv(netdev);
 170	struct ice_vsi *vsi = np->vsi;
 171
 172	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
 173				     ICE_FWD_TO_VSI))
 174		return -EINVAL;
 175
 176	return 0;
 177}
 178
 179/**
 180 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
 181 * @netdev: the net device on which the unsync is happening
 182 * @addr: MAC address to unsync
 183 *
 184 * This is a callback function which is called by the in kernel device unsync
 185 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
 186 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
 187 * delete the MAC filters from the hardware.
 188 */
 189static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
 190{
 191	struct ice_netdev_priv *np = netdev_priv(netdev);
 192	struct ice_vsi *vsi = np->vsi;
 193
 194	/* Under some circumstances, we might receive a request to delete our
 195	 * own device address from our uc list. Because we store the device
 196	 * address in the VSI's MAC filter list, we need to ignore such
 197	 * requests and not delete our device address from this list.
 198	 */
 199	if (ether_addr_equal(addr, netdev->dev_addr))
 200		return 0;
 201
 202	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
 203				     ICE_FWD_TO_VSI))
 204		return -EINVAL;
 205
 206	return 0;
 207}
 208
 209/**
 210 * ice_vsi_fltr_changed - check if filter state changed
 211 * @vsi: VSI to be checked
 212 *
 213 * returns true if filter state has changed, false otherwise.
 214 */
 215static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
 216{
 217	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
 218	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
 219	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
 220}
 221
 222/**
 223 * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
 224 * @vsi: the VSI being configured
 225 * @promisc_m: mask of promiscuous config bits
 226 * @set_promisc: enable or disable promisc flag request
 227 *
 228 */
 229static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
 230{
 231	struct ice_hw *hw = &vsi->back->hw;
 232	enum ice_status status = 0;
 233
 234	if (vsi->type != ICE_VSI_PF)
 235		return 0;
 236
 237	if (vsi->num_vlan > 1) {
 238		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
 239						  set_promisc);
 
 240	} else {
 241		if (set_promisc)
 242			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
 243						     0);
 244		else
 245			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
 246						       0);
 247	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248
 249	if (status)
 250		return -EIO;
 
 
 
 
 
 
 
 
 
 251
 252	return 0;
 
 
 253}
 254
 255/**
 256 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
 257 * @vsi: ptr to the VSI
 258 *
 259 * Push any outstanding VSI filter changes through the AdminQ.
 260 */
 261static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
 262{
 
 263	struct device *dev = ice_pf_to_dev(vsi->back);
 264	struct net_device *netdev = vsi->netdev;
 265	bool promisc_forced_on = false;
 266	struct ice_pf *pf = vsi->back;
 267	struct ice_hw *hw = &pf->hw;
 268	enum ice_status status = 0;
 269	u32 changed_flags = 0;
 270	u8 promisc_m;
 271	int err = 0;
 272
 273	if (!vsi->netdev)
 274		return -EINVAL;
 275
 276	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
 277		usleep_range(1000, 2000);
 278
 279	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
 280	vsi->current_netdev_flags = vsi->netdev->flags;
 281
 282	INIT_LIST_HEAD(&vsi->tmp_sync_list);
 283	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
 284
 285	if (ice_vsi_fltr_changed(vsi)) {
 286		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
 287		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 288		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
 289
 290		/* grab the netdev's addr_list_lock */
 291		netif_addr_lock_bh(netdev);
 292		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
 293			      ice_add_mac_to_unsync_list);
 294		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
 295			      ice_add_mac_to_unsync_list);
 296		/* our temp lists are populated. release lock */
 297		netif_addr_unlock_bh(netdev);
 298	}
 299
 300	/* Remove MAC addresses in the unsync list */
 301	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
 302	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
 303	if (status) {
 304		netdev_err(netdev, "Failed to delete MAC filters\n");
 305		/* if we failed because of alloc failures, just bail */
 306		if (status == ICE_ERR_NO_MEMORY) {
 307			err = -ENOMEM;
 308			goto out;
 309		}
 310	}
 311
 312	/* Add MAC addresses in the sync list */
 313	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
 314	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
 315	/* If filter is added successfully or already exists, do not go into
 316	 * 'if' condition and report it as error. Instead continue processing
 317	 * rest of the function.
 318	 */
 319	if (status && status != ICE_ERR_ALREADY_EXISTS) {
 320		netdev_err(netdev, "Failed to add MAC filters\n");
 321		/* If there is no more space for new umac filters, VSI
 322		 * should go into promiscuous mode. There should be some
 323		 * space reserved for promiscuous filters.
 324		 */
 325		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
 326		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
 327				      vsi->state)) {
 328			promisc_forced_on = true;
 329			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
 330				    vsi->vsi_num);
 331		} else {
 332			err = -EIO;
 333			goto out;
 334		}
 335	}
 
 336	/* check for changes in promiscuous modes */
 337	if (changed_flags & IFF_ALLMULTI) {
 338		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
 339			if (vsi->num_vlan > 1)
 340				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
 341			else
 342				promisc_m = ICE_MCAST_PROMISC_BITS;
 343
 344			err = ice_cfg_promisc(vsi, promisc_m, true);
 345			if (err) {
 346				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
 347					   vsi->vsi_num);
 348				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
 349				goto out_promisc;
 350			}
 351		} else {
 352			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
 353			if (vsi->num_vlan > 1)
 354				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
 355			else
 356				promisc_m = ICE_MCAST_PROMISC_BITS;
 357
 358			err = ice_cfg_promisc(vsi, promisc_m, false);
 359			if (err) {
 360				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
 361					   vsi->vsi_num);
 362				vsi->current_netdev_flags |= IFF_ALLMULTI;
 363				goto out_promisc;
 364			}
 365		}
 366	}
 367
 368	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
 369	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
 370		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
 371		if (vsi->current_netdev_flags & IFF_PROMISC) {
 372			/* Apply Rx filter rule to get traffic from wire */
 373			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
 374				err = ice_set_dflt_vsi(pf->first_sw, vsi);
 375				if (err && err != -EEXIST) {
 376					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
 377						   err, vsi->vsi_num);
 378					vsi->current_netdev_flags &=
 379						~IFF_PROMISC;
 380					goto out_promisc;
 381				}
 382				ice_cfg_vlan_pruning(vsi, false, false);
 
 
 
 
 
 
 
 
 
 
 
 383			}
 384		} else {
 385			/* Clear Rx filter to remove traffic from wire */
 386			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
 387				err = ice_clear_dflt_vsi(pf->first_sw);
 388				if (err) {
 389					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
 390						   err, vsi->vsi_num);
 391					vsi->current_netdev_flags |=
 392						IFF_PROMISC;
 393					goto out_promisc;
 394				}
 395				if (vsi->num_vlan > 1)
 396					ice_cfg_vlan_pruning(vsi, true, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 397			}
 398		}
 399	}
 400	goto exit;
 401
 402out_promisc:
 403	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
 404	goto exit;
 405out:
 406	/* if something went wrong then set the changed flag so we try again */
 407	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
 408	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 409exit:
 410	clear_bit(ICE_CFG_BUSY, vsi->state);
 411	return err;
 412}
 413
 414/**
 415 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
 416 * @pf: board private structure
 417 */
 418static void ice_sync_fltr_subtask(struct ice_pf *pf)
 419{
 420	int v;
 421
 422	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
 423		return;
 424
 425	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 426
 427	ice_for_each_vsi(pf, v)
 428		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
 429		    ice_vsi_sync_fltr(pf->vsi[v])) {
 430			/* come back and try again later */
 431			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 432			break;
 433		}
 434}
 435
 436/**
 437 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
 438 * @pf: the PF
 439 * @locked: is the rtnl_lock already held
 440 */
 441static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
 442{
 443	int node;
 444	int v;
 445
 446	ice_for_each_vsi(pf, v)
 447		if (pf->vsi[v])
 448			ice_dis_vsi(pf->vsi[v], locked);
 449
 450	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
 451		pf->pf_agg_node[node].num_vsis = 0;
 452
 453	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
 454		pf->vf_agg_node[node].num_vsis = 0;
 455}
 456
 457/**
 458 * ice_prepare_for_reset - prep for the core to reset
 459 * @pf: board private structure
 
 460 *
 461 * Inform or close all dependent features in prep for reset.
 462 */
 463static void
 464ice_prepare_for_reset(struct ice_pf *pf)
 465{
 466	struct ice_hw *hw = &pf->hw;
 467	unsigned int i;
 
 
 
 
 468
 469	/* already prepared for reset */
 470	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
 471		return;
 472
 
 
 473	ice_unplug_aux_dev(pf);
 474
 475	/* Notify VFs of impending reset */
 476	if (ice_check_sq_alive(hw, &hw->mailboxq))
 477		ice_vc_notify_reset(pf);
 478
 479	/* Disable VFs until reset is completed */
 480	ice_for_each_vf(pf, i)
 481		ice_set_vf_state_qs_dis(&pf->vf[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482
 483	/* clear SW filtering DB */
 484	ice_clear_hw_tbls(hw);
 485	/* disable the VSIs and their queues that are not already DOWN */
 
 486	ice_pf_dis_all_vsi(pf, false);
 487
 488	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
 489		ice_ptp_release(pf);
 
 
 
 490
 491	if (hw->port_info)
 492		ice_sched_clear_port(hw->port_info);
 493
 494	ice_shutdown_all_ctrlq(hw);
 495
 496	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
 497}
 498
 499/**
 500 * ice_do_reset - Initiate one of many types of resets
 501 * @pf: board private structure
 502 * @reset_type: reset type requested
 503 * before this function was called.
 504 */
 505static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
 506{
 507	struct device *dev = ice_pf_to_dev(pf);
 508	struct ice_hw *hw = &pf->hw;
 509
 510	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
 511
 512	ice_prepare_for_reset(pf);
 
 
 
 
 
 513
 514	/* trigger the reset */
 515	if (ice_reset(hw, reset_type)) {
 516		dev_err(dev, "reset %d failed\n", reset_type);
 517		set_bit(ICE_RESET_FAILED, pf->state);
 518		clear_bit(ICE_RESET_OICR_RECV, pf->state);
 519		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 520		clear_bit(ICE_PFR_REQ, pf->state);
 521		clear_bit(ICE_CORER_REQ, pf->state);
 522		clear_bit(ICE_GLOBR_REQ, pf->state);
 523		wake_up(&pf->reset_wait_queue);
 524		return;
 525	}
 526
 527	/* PFR is a bit of a special case because it doesn't result in an OICR
 528	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
 529	 * associated state bits.
 530	 */
 531	if (reset_type == ICE_RESET_PFR) {
 532		pf->pfr_count++;
 533		ice_rebuild(pf, reset_type);
 534		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 535		clear_bit(ICE_PFR_REQ, pf->state);
 536		wake_up(&pf->reset_wait_queue);
 537		ice_reset_all_vfs(pf, true);
 538	}
 539}
 540
 541/**
 542 * ice_reset_subtask - Set up for resetting the device and driver
 543 * @pf: board private structure
 544 */
 545static void ice_reset_subtask(struct ice_pf *pf)
 546{
 547	enum ice_reset_req reset_type = ICE_RESET_INVAL;
 548
 549	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
 550	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
 551	 * of reset is pending and sets bits in pf->state indicating the reset
 552	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
 553	 * prepare for pending reset if not already (for PF software-initiated
 554	 * global resets the software should already be prepared for it as
 555	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
 556	 * by firmware or software on other PFs, that bit is not set so prepare
 557	 * for the reset now), poll for reset done, rebuild and return.
 558	 */
 559	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
 560		/* Perform the largest reset requested */
 561		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
 562			reset_type = ICE_RESET_CORER;
 563		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
 564			reset_type = ICE_RESET_GLOBR;
 565		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
 566			reset_type = ICE_RESET_EMPR;
 567		/* return if no valid reset type requested */
 568		if (reset_type == ICE_RESET_INVAL)
 569			return;
 570		ice_prepare_for_reset(pf);
 571
 572		/* make sure we are ready to rebuild */
 573		if (ice_check_reset(&pf->hw)) {
 574			set_bit(ICE_RESET_FAILED, pf->state);
 575		} else {
 576			/* done with reset. start rebuild */
 577			pf->hw.reset_ongoing = false;
 578			ice_rebuild(pf, reset_type);
 579			/* clear bit to resume normal operations, but
 580			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
 581			 */
 582			clear_bit(ICE_RESET_OICR_RECV, pf->state);
 583			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
 584			clear_bit(ICE_PFR_REQ, pf->state);
 585			clear_bit(ICE_CORER_REQ, pf->state);
 586			clear_bit(ICE_GLOBR_REQ, pf->state);
 587			wake_up(&pf->reset_wait_queue);
 588			ice_reset_all_vfs(pf, true);
 589		}
 590
 591		return;
 592	}
 593
 594	/* No pending resets to finish processing. Check for new resets */
 595	if (test_bit(ICE_PFR_REQ, pf->state))
 596		reset_type = ICE_RESET_PFR;
 
 
 
 
 
 597	if (test_bit(ICE_CORER_REQ, pf->state))
 598		reset_type = ICE_RESET_CORER;
 599	if (test_bit(ICE_GLOBR_REQ, pf->state))
 600		reset_type = ICE_RESET_GLOBR;
 601	/* If no valid reset type requested just return */
 602	if (reset_type == ICE_RESET_INVAL)
 603		return;
 604
 605	/* reset if not already down or busy */
 606	if (!test_bit(ICE_DOWN, pf->state) &&
 607	    !test_bit(ICE_CFG_BUSY, pf->state)) {
 608		ice_do_reset(pf, reset_type);
 609	}
 610}
 611
 612/**
 613 * ice_print_topo_conflict - print topology conflict message
 614 * @vsi: the VSI whose topology status is being checked
 615 */
 616static void ice_print_topo_conflict(struct ice_vsi *vsi)
 617{
 618	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
 619	case ICE_AQ_LINK_TOPO_CONFLICT:
 620	case ICE_AQ_LINK_MEDIA_CONFLICT:
 621	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
 622	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
 623	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
 624		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
 625		break;
 626	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
 627		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
 
 
 
 628		break;
 629	default:
 630		break;
 631	}
 632}
 633
 634/**
 635 * ice_print_link_msg - print link up or down message
 636 * @vsi: the VSI whose link status is being queried
 637 * @isup: boolean for if the link is now up or down
 638 */
 639void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
 640{
 641	struct ice_aqc_get_phy_caps_data *caps;
 642	const char *an_advertised;
 643	enum ice_status status;
 644	const char *fec_req;
 645	const char *speed;
 646	const char *fec;
 647	const char *fc;
 648	const char *an;
 
 649
 650	if (!vsi)
 651		return;
 652
 653	if (vsi->current_isup == isup)
 654		return;
 655
 656	vsi->current_isup = isup;
 657
 658	if (!isup) {
 659		netdev_info(vsi->netdev, "NIC Link is Down\n");
 660		return;
 661	}
 662
 663	switch (vsi->port_info->phy.link_info.link_speed) {
 
 
 
 664	case ICE_AQ_LINK_SPEED_100GB:
 665		speed = "100 G";
 666		break;
 667	case ICE_AQ_LINK_SPEED_50GB:
 668		speed = "50 G";
 669		break;
 670	case ICE_AQ_LINK_SPEED_40GB:
 671		speed = "40 G";
 672		break;
 673	case ICE_AQ_LINK_SPEED_25GB:
 674		speed = "25 G";
 675		break;
 676	case ICE_AQ_LINK_SPEED_20GB:
 677		speed = "20 G";
 678		break;
 679	case ICE_AQ_LINK_SPEED_10GB:
 680		speed = "10 G";
 681		break;
 682	case ICE_AQ_LINK_SPEED_5GB:
 683		speed = "5 G";
 684		break;
 685	case ICE_AQ_LINK_SPEED_2500MB:
 686		speed = "2.5 G";
 687		break;
 688	case ICE_AQ_LINK_SPEED_1000MB:
 689		speed = "1 G";
 690		break;
 691	case ICE_AQ_LINK_SPEED_100MB:
 692		speed = "100 M";
 693		break;
 694	default:
 695		speed = "Unknown ";
 696		break;
 697	}
 698
 699	switch (vsi->port_info->fc.current_mode) {
 700	case ICE_FC_FULL:
 701		fc = "Rx/Tx";
 702		break;
 703	case ICE_FC_TX_PAUSE:
 704		fc = "Tx";
 705		break;
 706	case ICE_FC_RX_PAUSE:
 707		fc = "Rx";
 708		break;
 709	case ICE_FC_NONE:
 710		fc = "None";
 711		break;
 712	default:
 713		fc = "Unknown";
 714		break;
 715	}
 716
 717	/* Get FEC mode based on negotiated link info */
 718	switch (vsi->port_info->phy.link_info.fec_info) {
 719	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
 720	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
 721		fec = "RS-FEC";
 722		break;
 723	case ICE_AQ_LINK_25G_KR_FEC_EN:
 724		fec = "FC-FEC/BASE-R";
 725		break;
 726	default:
 727		fec = "NONE";
 728		break;
 729	}
 730
 731	/* check if autoneg completed, might be false due to not supported */
 732	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
 733		an = "True";
 734	else
 735		an = "False";
 736
 737	/* Get FEC mode requested based on PHY caps last SW configuration */
 738	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
 739	if (!caps) {
 740		fec_req = "Unknown";
 741		an_advertised = "Unknown";
 742		goto done;
 743	}
 744
 745	status = ice_aq_get_phy_caps(vsi->port_info, false,
 746				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
 747	if (status)
 748		netdev_info(vsi->netdev, "Get phy capability failed.\n");
 749
 750	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
 751
 752	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
 753	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
 754		fec_req = "RS-FEC";
 755	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
 756		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
 757		fec_req = "FC-FEC/BASE-R";
 758	else
 759		fec_req = "NONE";
 760
 761	kfree(caps);
 762
 763done:
 764	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
 765		    speed, fec_req, fec, an_advertised, an, fc);
 766	ice_print_topo_conflict(vsi);
 767}
 768
 769/**
 770 * ice_vsi_link_event - update the VSI's netdev
 771 * @vsi: the VSI on which the link event occurred
 772 * @link_up: whether or not the VSI needs to be set up or down
 773 */
 774static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
 775{
 776	if (!vsi)
 777		return;
 778
 779	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
 780		return;
 781
 782	if (vsi->type == ICE_VSI_PF) {
 783		if (link_up == netif_carrier_ok(vsi->netdev))
 784			return;
 785
 786		if (link_up) {
 787			netif_carrier_on(vsi->netdev);
 788			netif_tx_wake_all_queues(vsi->netdev);
 789		} else {
 790			netif_carrier_off(vsi->netdev);
 791			netif_tx_stop_all_queues(vsi->netdev);
 792		}
 793	}
 794}
 795
 796/**
 797 * ice_set_dflt_mib - send a default config MIB to the FW
 798 * @pf: private PF struct
 799 *
 800 * This function sends a default configuration MIB to the FW.
 801 *
 802 * If this function errors out at any point, the driver is still able to
 803 * function.  The main impact is that LFC may not operate as expected.
 804 * Therefore an error state in this function should be treated with a DBG
 805 * message and continue on with driver rebuild/reenable.
 806 */
 807static void ice_set_dflt_mib(struct ice_pf *pf)
 808{
 809	struct device *dev = ice_pf_to_dev(pf);
 810	u8 mib_type, *buf, *lldpmib = NULL;
 811	u16 len, typelen, offset = 0;
 812	struct ice_lldp_org_tlv *tlv;
 813	struct ice_hw *hw = &pf->hw;
 814	u32 ouisubtype;
 815
 816	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
 817	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
 818	if (!lldpmib) {
 819		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
 820			__func__);
 821		return;
 822	}
 823
 824	/* Add ETS CFG TLV */
 825	tlv = (struct ice_lldp_org_tlv *)lldpmib;
 826	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
 827		   ICE_IEEE_ETS_TLV_LEN);
 828	tlv->typelen = htons(typelen);
 829	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 830		      ICE_IEEE_SUBTYPE_ETS_CFG);
 831	tlv->ouisubtype = htonl(ouisubtype);
 832
 833	buf = tlv->tlvinfo;
 834	buf[0] = 0;
 835
 836	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
 837	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
 838	 * Octets 13 - 20 are TSA values - leave as zeros
 839	 */
 840	buf[5] = 0x64;
 841	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
 842	offset += len + 2;
 843	tlv = (struct ice_lldp_org_tlv *)
 844		((char *)tlv + sizeof(tlv->typelen) + len);
 845
 846	/* Add ETS REC TLV */
 847	buf = tlv->tlvinfo;
 848	tlv->typelen = htons(typelen);
 849
 850	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 851		      ICE_IEEE_SUBTYPE_ETS_REC);
 852	tlv->ouisubtype = htonl(ouisubtype);
 853
 854	/* First octet of buf is reserved
 855	 * Octets 1 - 4 map UP to TC - all UPs map to zero
 856	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
 857	 * Octets 13 - 20 are TSA value - leave as zeros
 858	 */
 859	buf[5] = 0x64;
 860	offset += len + 2;
 861	tlv = (struct ice_lldp_org_tlv *)
 862		((char *)tlv + sizeof(tlv->typelen) + len);
 863
 864	/* Add PFC CFG TLV */
 865	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
 866		   ICE_IEEE_PFC_TLV_LEN);
 867	tlv->typelen = htons(typelen);
 868
 869	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
 870		      ICE_IEEE_SUBTYPE_PFC_CFG);
 871	tlv->ouisubtype = htonl(ouisubtype);
 872
 873	/* Octet 1 left as all zeros - PFC disabled */
 874	buf[0] = 0x08;
 875	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
 876	offset += len + 2;
 877
 878	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
 879		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
 880
 881	kfree(lldpmib);
 882}
 883
 884/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 885 * ice_check_module_power
 886 * @pf: pointer to PF struct
 887 * @link_cfg_err: bitmap from the link info structure
 888 *
 889 * check module power level returned by a previous call to aq_get_link_info
 890 * and print error messages if module power level is not supported
 891 */
 892static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
 893{
 894	/* if module power level is supported, clear the flag */
 895	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
 896			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
 897		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
 898		return;
 899	}
 900
 901	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
 902	 * above block didn't clear this bit, there's nothing to do
 903	 */
 904	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
 905		return;
 906
 907	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
 908		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
 909		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
 910	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
 911		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
 912		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
 913	}
 914}
 915
 916/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 917 * ice_link_event - process the link event
 918 * @pf: PF that the link event is associated with
 919 * @pi: port_info for the port that the link event is associated with
 920 * @link_up: true if the physical link is up and false if it is down
 921 * @link_speed: current link speed received from the link event
 922 *
 923 * Returns 0 on success and negative on failure
 924 */
 925static int
 926ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
 927	       u16 link_speed)
 928{
 929	struct device *dev = ice_pf_to_dev(pf);
 930	struct ice_phy_info *phy_info;
 931	enum ice_status status;
 932	struct ice_vsi *vsi;
 933	u16 old_link_speed;
 934	bool old_link;
 
 935
 936	phy_info = &pi->phy;
 937	phy_info->link_info_old = phy_info->link_info;
 938
 939	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
 940	old_link_speed = phy_info->link_info_old.link_speed;
 941
 942	/* update the link info structures and re-enable link events,
 943	 * don't bail on failure due to other book keeping needed
 944	 */
 945	status = ice_update_link_info(pi);
 946	if (status)
 947		dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
 948			pi->lport, ice_stat_str(status),
 949			ice_aq_str(pi->hw->adminq.sq_last_status));
 950
 951	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
 952
 953	/* Check if the link state is up after updating link info, and treat
 954	 * this event as an UP event since the link is actually UP now.
 955	 */
 956	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
 957		link_up = true;
 958
 959	vsi = ice_get_main_vsi(pf);
 960	if (!vsi || !vsi->port_info)
 961		return -EINVAL;
 962
 963	/* turn off PHY if media was removed */
 964	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
 965	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
 966		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
 967		ice_set_link(vsi, false);
 968	}
 969
 970	/* if the old link up/down and speed is the same as the new */
 971	if (link_up == old_link && link_speed == old_link_speed)
 972		return 0;
 973
 
 
 974	if (ice_is_dcb_active(pf)) {
 975		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
 976			ice_dcb_rebuild(pf);
 977	} else {
 978		if (link_up)
 979			ice_set_dflt_mib(pf);
 980	}
 981	ice_vsi_link_event(vsi, link_up);
 982	ice_print_link_msg(vsi, link_up);
 983
 984	ice_vc_notify_link_state(pf);
 985
 986	return 0;
 987}
 988
 989/**
 990 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
 991 * @pf: board private structure
 992 */
 993static void ice_watchdog_subtask(struct ice_pf *pf)
 994{
 995	int i;
 996
 997	/* if interface is down do nothing */
 998	if (test_bit(ICE_DOWN, pf->state) ||
 999	    test_bit(ICE_CFG_BUSY, pf->state))
1000		return;
1001
1002	/* make sure we don't do these things too often */
1003	if (time_before(jiffies,
1004			pf->serv_tmr_prev + pf->serv_tmr_period))
1005		return;
1006
1007	pf->serv_tmr_prev = jiffies;
1008
1009	/* Update the stats for active netdevs so the network stack
1010	 * can look at updated numbers whenever it cares to
1011	 */
1012	ice_update_pf_stats(pf);
1013	ice_for_each_vsi(pf, i)
1014		if (pf->vsi[i] && pf->vsi[i]->netdev)
1015			ice_update_vsi_stats(pf->vsi[i]);
1016}
1017
1018/**
1019 * ice_init_link_events - enable/initialize link events
1020 * @pi: pointer to the port_info instance
1021 *
1022 * Returns -EIO on failure, 0 on success
1023 */
1024static int ice_init_link_events(struct ice_port_info *pi)
1025{
1026	u16 mask;
1027
1028	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1029		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
 
1030
1031	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1032		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1033			pi->lport);
1034		return -EIO;
1035	}
1036
1037	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1038		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1039			pi->lport);
1040		return -EIO;
1041	}
1042
1043	return 0;
1044}
1045
1046/**
1047 * ice_handle_link_event - handle link event via ARQ
1048 * @pf: PF that the link event is associated with
1049 * @event: event structure containing link status info
1050 */
1051static int
1052ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1053{
1054	struct ice_aqc_get_link_status_data *link_data;
1055	struct ice_port_info *port_info;
1056	int status;
1057
1058	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1059	port_info = pf->hw.port_info;
1060	if (!port_info)
1061		return -EINVAL;
1062
1063	status = ice_link_event(pf, port_info,
1064				!!(link_data->link_info & ICE_AQ_LINK_UP),
1065				le16_to_cpu(link_data->link_speed));
1066	if (status)
1067		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1068			status);
1069
1070	return status;
1071}
1072
1073enum ice_aq_task_state {
1074	ICE_AQ_TASK_WAITING = 0,
1075	ICE_AQ_TASK_COMPLETE,
1076	ICE_AQ_TASK_CANCELED,
1077};
 
 
 
 
 
 
 
 
 
 
1078
1079struct ice_aq_task {
1080	struct hlist_node entry;
1081
1082	u16 opcode;
1083	struct ice_rq_event_info *event;
1084	enum ice_aq_task_state state;
1085};
 
 
1086
1087/**
1088 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1089 * @pf: pointer to the PF private structure
 
1090 * @opcode: the opcode to wait for
1091 * @timeout: how long to wait, in jiffies
1092 * @event: storage for the event info
1093 *
1094 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1095 * current thread will be put to sleep until the specified event occurs or
1096 * until the given timeout is reached.
 
 
1097 *
1098 * To obtain only the descriptor contents, pass an event without an allocated
1099 * msg_buf. If the complete data buffer is desired, allocate the
1100 * event->msg_buf with enough space ahead of time.
1101 *
1102 * Returns: zero on success, or a negative error code on failure.
1103 */
1104int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1105			  struct ice_rq_event_info *event)
1106{
1107	struct device *dev = ice_pf_to_dev(pf);
1108	struct ice_aq_task *task;
1109	unsigned long start;
1110	long ret;
1111	int err;
1112
1113	task = kzalloc(sizeof(*task), GFP_KERNEL);
1114	if (!task)
1115		return -ENOMEM;
1116
1117	INIT_HLIST_NODE(&task->entry);
1118	task->opcode = opcode;
1119	task->event = event;
1120	task->state = ICE_AQ_TASK_WAITING;
1121
1122	spin_lock_bh(&pf->aq_wait_lock);
1123	hlist_add_head(&task->entry, &pf->aq_wait_list);
1124	spin_unlock_bh(&pf->aq_wait_lock);
 
1125
1126	start = jiffies;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1127
1128	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
 
1129					       timeout);
1130	switch (task->state) {
 
 
 
 
1131	case ICE_AQ_TASK_WAITING:
1132		err = ret < 0 ? ret : -ETIMEDOUT;
1133		break;
1134	case ICE_AQ_TASK_CANCELED:
1135		err = ret < 0 ? ret : -ECANCELED;
1136		break;
1137	case ICE_AQ_TASK_COMPLETE:
1138		err = ret < 0 ? ret : 0;
1139		break;
1140	default:
1141		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1142		err = -EINVAL;
1143		break;
1144	}
1145
1146	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1147		jiffies_to_msecs(jiffies - start),
1148		jiffies_to_msecs(timeout),
1149		opcode);
1150
1151	spin_lock_bh(&pf->aq_wait_lock);
1152	hlist_del(&task->entry);
1153	spin_unlock_bh(&pf->aq_wait_lock);
1154	kfree(task);
1155
1156	return err;
1157}
1158
1159/**
1160 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1161 * @pf: pointer to the PF private structure
1162 * @opcode: the opcode of the event
1163 * @event: the event to check
1164 *
1165 * Loops over the current list of pending threads waiting for an AdminQ event.
1166 * For each matching task, copy the contents of the event into the task
1167 * structure and wake up the thread.
1168 *
1169 * If multiple threads wait for the same opcode, they will all be woken up.
1170 *
1171 * Note that event->msg_buf will only be duplicated if the event has a buffer
1172 * with enough space already allocated. Otherwise, only the descriptor and
1173 * message length will be copied.
1174 *
1175 * Returns: true if an event was found, false otherwise
1176 */
1177static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1178				struct ice_rq_event_info *event)
1179{
 
1180	struct ice_aq_task *task;
1181	bool found = false;
1182
1183	spin_lock_bh(&pf->aq_wait_lock);
1184	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1185		if (task->state || task->opcode != opcode)
 
 
1186			continue;
1187
1188		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1189		task->event->msg_len = event->msg_len;
 
1190
1191		/* Only copy the data buffer if a destination was set */
1192		if (task->event->msg_buf &&
1193		    task->event->buf_len > event->buf_len) {
1194			memcpy(task->event->msg_buf, event->msg_buf,
1195			       event->buf_len);
1196			task->event->buf_len = event->buf_len;
1197		}
1198
1199		task->state = ICE_AQ_TASK_COMPLETE;
1200		found = true;
1201	}
1202	spin_unlock_bh(&pf->aq_wait_lock);
1203
1204	if (found)
1205		wake_up(&pf->aq_wait_queue);
1206}
1207
1208/**
1209 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1210 * @pf: the PF private structure
1211 *
1212 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1213 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1214 */
1215static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1216{
1217	struct ice_aq_task *task;
1218
1219	spin_lock_bh(&pf->aq_wait_lock);
1220	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1221		task->state = ICE_AQ_TASK_CANCELED;
1222	spin_unlock_bh(&pf->aq_wait_lock);
1223
1224	wake_up(&pf->aq_wait_queue);
1225}
1226
 
 
1227/**
1228 * __ice_clean_ctrlq - helper function to clean controlq rings
1229 * @pf: ptr to struct ice_pf
1230 * @q_type: specific Control queue type
1231 */
1232static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1233{
1234	struct device *dev = ice_pf_to_dev(pf);
1235	struct ice_rq_event_info event;
1236	struct ice_hw *hw = &pf->hw;
1237	struct ice_ctl_q_info *cq;
1238	u16 pending, i = 0;
1239	const char *qtype;
1240	u32 oldval, val;
1241
1242	/* Do not clean control queue if/when PF reset fails */
1243	if (test_bit(ICE_RESET_FAILED, pf->state))
1244		return 0;
1245
1246	switch (q_type) {
1247	case ICE_CTL_Q_ADMIN:
1248		cq = &hw->adminq;
1249		qtype = "Admin";
1250		break;
1251	case ICE_CTL_Q_SB:
1252		cq = &hw->sbq;
1253		qtype = "Sideband";
1254		break;
1255	case ICE_CTL_Q_MAILBOX:
1256		cq = &hw->mailboxq;
1257		qtype = "Mailbox";
1258		/* we are going to try to detect a malicious VF, so set the
1259		 * state to begin detection
1260		 */
1261		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1262		break;
1263	default:
1264		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1265		return 0;
1266	}
1267
1268	/* check for error indications - PF_xx_AxQLEN register layout for
1269	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1270	 */
1271	val = rd32(hw, cq->rq.len);
1272	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1273		   PF_FW_ARQLEN_ARQCRIT_M)) {
1274		oldval = val;
1275		if (val & PF_FW_ARQLEN_ARQVFE_M)
1276			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1277				qtype);
1278		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1279			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1280				qtype);
1281		}
1282		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1283			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1284				qtype);
1285		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1286			 PF_FW_ARQLEN_ARQCRIT_M);
1287		if (oldval != val)
1288			wr32(hw, cq->rq.len, val);
1289	}
1290
1291	val = rd32(hw, cq->sq.len);
1292	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1293		   PF_FW_ATQLEN_ATQCRIT_M)) {
1294		oldval = val;
1295		if (val & PF_FW_ATQLEN_ATQVFE_M)
1296			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1297				qtype);
1298		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1299			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1300				qtype);
1301		}
1302		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1303			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1304				qtype);
1305		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1306			 PF_FW_ATQLEN_ATQCRIT_M);
1307		if (oldval != val)
1308			wr32(hw, cq->sq.len, val);
1309	}
1310
1311	event.buf_len = cq->rq_buf_size;
1312	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1313	if (!event.msg_buf)
1314		return 0;
1315
1316	do {
1317		enum ice_status ret;
1318		u16 opcode;
 
1319
1320		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1321		if (ret == ICE_ERR_AQ_NO_WORK)
1322			break;
1323		if (ret) {
1324			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1325				ice_stat_str(ret));
1326			break;
1327		}
1328
1329		opcode = le16_to_cpu(event.desc.opcode);
1330
1331		/* Notify any thread that might be waiting for this event */
1332		ice_aq_check_events(pf, opcode, &event);
1333
1334		switch (opcode) {
1335		case ice_aqc_opc_get_link_status:
1336			if (ice_handle_link_event(pf, &event))
1337				dev_err(dev, "Could not handle link event\n");
1338			break;
1339		case ice_aqc_opc_event_lan_overflow:
1340			ice_vf_lan_overflow_event(pf, &event);
1341			break;
1342		case ice_mbx_opc_send_msg_to_pf:
1343			if (!ice_is_malicious_vf(pf, &event, i, pending))
1344				ice_vc_process_vf_msg(pf, &event);
 
 
 
 
 
 
 
 
 
 
 
 
1345			break;
1346		case ice_aqc_opc_fw_logging:
1347			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1348			break;
1349		case ice_aqc_opc_lldp_set_mib_change:
1350			ice_dcb_process_lldp_set_mib_change(pf, &event);
1351			break;
1352		default:
1353			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1354				qtype, opcode);
1355			break;
1356		}
1357	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1358
1359	kfree(event.msg_buf);
1360
1361	return pending && (i == ICE_DFLT_IRQ_WORK);
1362}
1363
1364/**
1365 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1366 * @hw: pointer to hardware info
1367 * @cq: control queue information
1368 *
1369 * returns true if there are pending messages in a queue, false if there aren't
1370 */
1371static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1372{
1373	u16 ntu;
1374
1375	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1376	return cq->rq.next_to_clean != ntu;
1377}
1378
1379/**
1380 * ice_clean_adminq_subtask - clean the AdminQ rings
1381 * @pf: board private structure
1382 */
1383static void ice_clean_adminq_subtask(struct ice_pf *pf)
1384{
1385	struct ice_hw *hw = &pf->hw;
1386
1387	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1388		return;
1389
1390	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1391		return;
1392
1393	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1394
1395	/* There might be a situation where new messages arrive to a control
1396	 * queue between processing the last message and clearing the
1397	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1398	 * ice_ctrlq_pending) and process new messages if any.
1399	 */
1400	if (ice_ctrlq_pending(hw, &hw->adminq))
1401		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1402
1403	ice_flush(hw);
1404}
1405
1406/**
1407 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1408 * @pf: board private structure
1409 */
1410static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1411{
1412	struct ice_hw *hw = &pf->hw;
1413
1414	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1415		return;
1416
1417	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1418		return;
1419
1420	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1421
1422	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1423		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1424
1425	ice_flush(hw);
1426}
1427
1428/**
1429 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1430 * @pf: board private structure
1431 */
1432static void ice_clean_sbq_subtask(struct ice_pf *pf)
1433{
1434	struct ice_hw *hw = &pf->hw;
1435
1436	/* Nothing to do here if sideband queue is not supported */
1437	if (!ice_is_sbq_supported(hw)) {
 
 
1438		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1439		return;
1440	}
1441
1442	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1443		return;
1444
1445	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1446		return;
1447
1448	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1449
1450	if (ice_ctrlq_pending(hw, &hw->sbq))
1451		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1452
1453	ice_flush(hw);
1454}
1455
1456/**
1457 * ice_service_task_schedule - schedule the service task to wake up
1458 * @pf: board private structure
1459 *
1460 * If not already scheduled, this puts the task into the work queue.
1461 */
1462void ice_service_task_schedule(struct ice_pf *pf)
1463{
1464	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1465	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1466	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1467		queue_work(ice_wq, &pf->serv_task);
1468}
1469
1470/**
1471 * ice_service_task_complete - finish up the service task
1472 * @pf: board private structure
1473 */
1474static void ice_service_task_complete(struct ice_pf *pf)
1475{
1476	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1477
1478	/* force memory (pf->state) to sync before next service task */
1479	smp_mb__before_atomic();
1480	clear_bit(ICE_SERVICE_SCHED, pf->state);
1481}
1482
1483/**
1484 * ice_service_task_stop - stop service task and cancel works
1485 * @pf: board private structure
1486 *
1487 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1488 * 1 otherwise.
1489 */
1490static int ice_service_task_stop(struct ice_pf *pf)
1491{
1492	int ret;
1493
1494	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1495
1496	if (pf->serv_tmr.function)
1497		del_timer_sync(&pf->serv_tmr);
1498	if (pf->serv_task.func)
1499		cancel_work_sync(&pf->serv_task);
1500
1501	clear_bit(ICE_SERVICE_SCHED, pf->state);
1502	return ret;
1503}
1504
1505/**
1506 * ice_service_task_restart - restart service task and schedule works
1507 * @pf: board private structure
1508 *
1509 * This function is needed for suspend and resume works (e.g WoL scenario)
1510 */
1511static void ice_service_task_restart(struct ice_pf *pf)
1512{
1513	clear_bit(ICE_SERVICE_DIS, pf->state);
1514	ice_service_task_schedule(pf);
1515}
1516
1517/**
1518 * ice_service_timer - timer callback to schedule service task
1519 * @t: pointer to timer_list
1520 */
1521static void ice_service_timer(struct timer_list *t)
1522{
1523	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1524
1525	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1526	ice_service_task_schedule(pf);
1527}
1528
1529/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1530 * ice_handle_mdd_event - handle malicious driver detect event
1531 * @pf: pointer to the PF structure
1532 *
1533 * Called from service task. OICR interrupt handler indicates MDD event.
1534 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1535 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1536 * disable the queue, the PF can be configured to reset the VF using ethtool
1537 * private flag mdd-auto-reset-vf.
1538 */
1539static void ice_handle_mdd_event(struct ice_pf *pf)
1540{
1541	struct device *dev = ice_pf_to_dev(pf);
1542	struct ice_hw *hw = &pf->hw;
1543	unsigned int i;
 
1544	u32 reg;
1545
1546	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1547		/* Since the VF MDD event logging is rate limited, check if
1548		 * there are pending MDD events.
1549		 */
1550		ice_print_vfs_mdd_events(pf);
1551		return;
1552	}
1553
1554	/* find what triggered an MDD event */
1555	reg = rd32(hw, GL_MDET_TX_PQM);
1556	if (reg & GL_MDET_TX_PQM_VALID_M) {
1557		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1558				GL_MDET_TX_PQM_PF_NUM_S;
1559		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1560				GL_MDET_TX_PQM_VF_NUM_S;
1561		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1562				GL_MDET_TX_PQM_MAL_TYPE_S;
1563		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1564				GL_MDET_TX_PQM_QNUM_S);
1565
1566		if (netif_msg_tx_err(pf))
1567			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1568				 event, queue, pf_num, vf_num);
1569		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1570	}
1571
1572	reg = rd32(hw, GL_MDET_TX_TCLAN);
1573	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1574		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1575				GL_MDET_TX_TCLAN_PF_NUM_S;
1576		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1577				GL_MDET_TX_TCLAN_VF_NUM_S;
1578		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1579				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1580		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1581				GL_MDET_TX_TCLAN_QNUM_S);
1582
1583		if (netif_msg_tx_err(pf))
1584			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1585				 event, queue, pf_num, vf_num);
1586		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1587	}
1588
1589	reg = rd32(hw, GL_MDET_RX);
1590	if (reg & GL_MDET_RX_VALID_M) {
1591		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1592				GL_MDET_RX_PF_NUM_S;
1593		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1594				GL_MDET_RX_VF_NUM_S;
1595		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1596				GL_MDET_RX_MAL_TYPE_S;
1597		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1598				GL_MDET_RX_QNUM_S);
1599
1600		if (netif_msg_rx_err(pf))
1601			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1602				 event, queue, pf_num, vf_num);
1603		wr32(hw, GL_MDET_RX, 0xffffffff);
1604	}
1605
1606	/* check to see if this PF caused an MDD event */
1607	reg = rd32(hw, PF_MDET_TX_PQM);
1608	if (reg & PF_MDET_TX_PQM_VALID_M) {
1609		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1610		if (netif_msg_tx_err(pf))
1611			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1612	}
1613
1614	reg = rd32(hw, PF_MDET_TX_TCLAN);
1615	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1616		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1617		if (netif_msg_tx_err(pf))
1618			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1619	}
1620
1621	reg = rd32(hw, PF_MDET_RX);
1622	if (reg & PF_MDET_RX_VALID_M) {
1623		wr32(hw, PF_MDET_RX, 0xFFFF);
1624		if (netif_msg_rx_err(pf))
1625			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1626	}
1627
1628	/* Check to see if one of the VFs caused an MDD event, and then
1629	 * increment counters and set print pending
1630	 */
1631	ice_for_each_vf(pf, i) {
1632		struct ice_vf *vf = &pf->vf[i];
 
1633
1634		reg = rd32(hw, VP_MDET_TX_PQM(i));
1635		if (reg & VP_MDET_TX_PQM_VALID_M) {
1636			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1637			vf->mdd_tx_events.count++;
1638			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1639			if (netif_msg_tx_err(pf))
1640				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1641					 i);
 
 
1642		}
1643
1644		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1645		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1646			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1647			vf->mdd_tx_events.count++;
1648			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1649			if (netif_msg_tx_err(pf))
1650				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1651					 i);
 
 
1652		}
1653
1654		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1655		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1656			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1657			vf->mdd_tx_events.count++;
1658			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1659			if (netif_msg_tx_err(pf))
1660				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1661					 i);
 
 
1662		}
1663
1664		reg = rd32(hw, VP_MDET_RX(i));
1665		if (reg & VP_MDET_RX_VALID_M) {
1666			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1667			vf->mdd_rx_events.count++;
1668			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1669			if (netif_msg_rx_err(pf))
1670				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1671					 i);
1672
1673			/* Since the queue is disabled on VF Rx MDD events, the
1674			 * PF can be configured to reset the VF through ethtool
1675			 * private flag mdd-auto-reset-vf.
1676			 */
1677			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1678				/* VF MDD event counters will be cleared by
1679				 * reset, so print the event prior to reset.
1680				 */
1681				ice_print_vf_rx_mdd_event(vf);
1682				ice_reset_vf(&pf->vf[i], false);
1683			}
1684		}
 
 
 
 
1685	}
 
1686
1687	ice_print_vfs_mdd_events(pf);
1688}
1689
1690/**
1691 * ice_force_phys_link_state - Force the physical link state
1692 * @vsi: VSI to force the physical link state to up/down
1693 * @link_up: true/false indicates to set the physical link to up/down
1694 *
1695 * Force the physical link state by getting the current PHY capabilities from
1696 * hardware and setting the PHY config based on the determined capabilities. If
1697 * link changes a link event will be triggered because both the Enable Automatic
1698 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1699 *
1700 * Returns 0 on success, negative on failure
1701 */
1702static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1703{
1704	struct ice_aqc_get_phy_caps_data *pcaps;
1705	struct ice_aqc_set_phy_cfg_data *cfg;
1706	struct ice_port_info *pi;
1707	struct device *dev;
1708	int retcode;
1709
1710	if (!vsi || !vsi->port_info || !vsi->back)
1711		return -EINVAL;
1712	if (vsi->type != ICE_VSI_PF)
1713		return 0;
1714
1715	dev = ice_pf_to_dev(vsi->back);
1716
1717	pi = vsi->port_info;
1718
1719	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1720	if (!pcaps)
1721		return -ENOMEM;
1722
1723	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1724				      NULL);
1725	if (retcode) {
1726		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1727			vsi->vsi_num, retcode);
1728		retcode = -EIO;
1729		goto out;
1730	}
1731
1732	/* No change in link */
1733	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1734	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1735		goto out;
1736
1737	/* Use the current user PHY configuration. The current user PHY
1738	 * configuration is initialized during probe from PHY capabilities
1739	 * software mode, and updated on set PHY configuration.
1740	 */
1741	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1742	if (!cfg) {
1743		retcode = -ENOMEM;
1744		goto out;
1745	}
1746
1747	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1748	if (link_up)
1749		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1750	else
1751		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1752
1753	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1754	if (retcode) {
1755		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1756			vsi->vsi_num, retcode);
1757		retcode = -EIO;
1758	}
1759
1760	kfree(cfg);
1761out:
1762	kfree(pcaps);
1763	return retcode;
1764}
1765
1766/**
1767 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1768 * @pi: port info structure
1769 *
1770 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1771 */
1772static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1773{
1774	struct ice_aqc_get_phy_caps_data *pcaps;
1775	struct ice_pf *pf = pi->hw->back;
1776	enum ice_status status;
1777	int err = 0;
1778
1779	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1780	if (!pcaps)
1781		return -ENOMEM;
1782
1783	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1784				     NULL);
1785
1786	if (status) {
1787		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1788		err = -EIO;
1789		goto out;
1790	}
1791
1792	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1793	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1794
1795out:
1796	kfree(pcaps);
1797	return err;
1798}
1799
1800/**
1801 * ice_init_link_dflt_override - Initialize link default override
1802 * @pi: port info structure
1803 *
1804 * Initialize link default override and PHY total port shutdown during probe
1805 */
1806static void ice_init_link_dflt_override(struct ice_port_info *pi)
1807{
1808	struct ice_link_default_override_tlv *ldo;
1809	struct ice_pf *pf = pi->hw->back;
1810
1811	ldo = &pf->link_dflt_override;
1812	if (ice_get_link_default_override(ldo, pi))
1813		return;
1814
1815	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1816		return;
1817
1818	/* Enable Total Port Shutdown (override/replace link-down-on-close
1819	 * ethtool private flag) for ports with Port Disable bit set.
1820	 */
1821	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1822	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1823}
1824
1825/**
1826 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1827 * @pi: port info structure
1828 *
1829 * If default override is enabled, initialize the user PHY cfg speed and FEC
1830 * settings using the default override mask from the NVM.
1831 *
1832 * The PHY should only be configured with the default override settings the
1833 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1834 * is used to indicate that the user PHY cfg default override is initialized
1835 * and the PHY has not been configured with the default override settings. The
1836 * state is set here, and cleared in ice_configure_phy the first time the PHY is
1837 * configured.
1838 *
1839 * This function should be called only if the FW doesn't support default
1840 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1841 */
1842static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1843{
1844	struct ice_link_default_override_tlv *ldo;
1845	struct ice_aqc_set_phy_cfg_data *cfg;
1846	struct ice_phy_info *phy = &pi->phy;
1847	struct ice_pf *pf = pi->hw->back;
1848
1849	ldo = &pf->link_dflt_override;
1850
1851	/* If link default override is enabled, use to mask NVM PHY capabilities
1852	 * for speed and FEC default configuration.
1853	 */
1854	cfg = &phy->curr_user_phy_cfg;
1855
1856	if (ldo->phy_type_low || ldo->phy_type_high) {
1857		cfg->phy_type_low = pf->nvm_phy_type_lo &
1858				    cpu_to_le64(ldo->phy_type_low);
1859		cfg->phy_type_high = pf->nvm_phy_type_hi &
1860				     cpu_to_le64(ldo->phy_type_high);
1861	}
1862	cfg->link_fec_opt = ldo->fec_options;
1863	phy->curr_user_fec_req = ICE_FEC_AUTO;
1864
1865	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1866}
1867
1868/**
1869 * ice_init_phy_user_cfg - Initialize the PHY user configuration
1870 * @pi: port info structure
1871 *
1872 * Initialize the current user PHY configuration, speed, FEC, and FC requested
1873 * mode to default. The PHY defaults are from get PHY capabilities topology
1874 * with media so call when media is first available. An error is returned if
1875 * called when media is not available. The PHY initialization completed state is
1876 * set here.
1877 *
1878 * These configurations are used when setting PHY
1879 * configuration. The user PHY configuration is updated on set PHY
1880 * configuration. Returns 0 on success, negative on failure
1881 */
1882static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1883{
1884	struct ice_aqc_get_phy_caps_data *pcaps;
1885	struct ice_phy_info *phy = &pi->phy;
1886	struct ice_pf *pf = pi->hw->back;
1887	enum ice_status status;
1888	int err = 0;
1889
1890	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1891		return -EIO;
1892
1893	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1894	if (!pcaps)
1895		return -ENOMEM;
1896
1897	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1898		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1899					     pcaps, NULL);
1900	else
1901		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1902					     pcaps, NULL);
1903	if (status) {
1904		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1905		err = -EIO;
1906		goto err_out;
1907	}
1908
1909	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1910
1911	/* check if lenient mode is supported and enabled */
1912	if (ice_fw_supports_link_override(pi->hw) &&
1913	    !(pcaps->module_compliance_enforcement &
1914	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1915		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1916
1917		/* if the FW supports default PHY configuration mode, then the driver
1918		 * does not have to apply link override settings. If not,
1919		 * initialize user PHY configuration with link override values
1920		 */
1921		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
1922		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
1923			ice_init_phy_cfg_dflt_override(pi);
1924			goto out;
1925		}
1926	}
1927
1928	/* if link default override is not enabled, set user flow control and
1929	 * FEC settings based on what get_phy_caps returned
1930	 */
1931	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1932						      pcaps->link_fec_options);
1933	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1934
1935out:
1936	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1937	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1938err_out:
1939	kfree(pcaps);
1940	return err;
1941}
1942
1943/**
1944 * ice_configure_phy - configure PHY
1945 * @vsi: VSI of PHY
1946 *
1947 * Set the PHY configuration. If the current PHY configuration is the same as
1948 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1949 * configure the based get PHY capabilities for topology with media.
1950 */
1951static int ice_configure_phy(struct ice_vsi *vsi)
1952{
1953	struct device *dev = ice_pf_to_dev(vsi->back);
1954	struct ice_port_info *pi = vsi->port_info;
1955	struct ice_aqc_get_phy_caps_data *pcaps;
1956	struct ice_aqc_set_phy_cfg_data *cfg;
1957	struct ice_phy_info *phy = &pi->phy;
1958	struct ice_pf *pf = vsi->back;
1959	enum ice_status status;
1960	int err = 0;
1961
1962	/* Ensure we have media as we cannot configure a medialess port */
1963	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1964		return -EPERM;
1965
1966	ice_print_topo_conflict(vsi);
1967
1968	if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
 
1969		return -EPERM;
1970
1971	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1972		return ice_force_phys_link_state(vsi, true);
1973
1974	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1975	if (!pcaps)
1976		return -ENOMEM;
1977
1978	/* Get current PHY config */
1979	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1980				     NULL);
1981	if (status) {
1982		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1983			vsi->vsi_num, ice_stat_str(status));
1984		err = -EIO;
1985		goto done;
1986	}
1987
1988	/* If PHY enable link is configured and configuration has not changed,
1989	 * there's nothing to do
1990	 */
1991	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1992	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
1993		goto done;
1994
1995	/* Use PHY topology as baseline for configuration */
1996	memset(pcaps, 0, sizeof(*pcaps));
1997	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1998		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1999					     pcaps, NULL);
2000	else
2001		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2002					     pcaps, NULL);
2003	if (status) {
2004		dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
2005			vsi->vsi_num, ice_stat_str(status));
2006		err = -EIO;
2007		goto done;
2008	}
2009
2010	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2011	if (!cfg) {
2012		err = -ENOMEM;
2013		goto done;
2014	}
2015
2016	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2017
2018	/* Speed - If default override pending, use curr_user_phy_cfg set in
2019	 * ice_init_phy_user_cfg_ldo.
2020	 */
2021	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2022			       vsi->back->state)) {
2023		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2024		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2025	} else {
2026		u64 phy_low = 0, phy_high = 0;
2027
2028		ice_update_phy_type(&phy_low, &phy_high,
2029				    pi->phy.curr_user_speed_req);
2030		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2031		cfg->phy_type_high = pcaps->phy_type_high &
2032				     cpu_to_le64(phy_high);
2033	}
2034
2035	/* Can't provide what was requested; use PHY capabilities */
2036	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2037		cfg->phy_type_low = pcaps->phy_type_low;
2038		cfg->phy_type_high = pcaps->phy_type_high;
2039	}
2040
2041	/* FEC */
2042	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2043
2044	/* Can't provide what was requested; use PHY capabilities */
2045	if (cfg->link_fec_opt !=
2046	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2047		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2048		cfg->link_fec_opt = pcaps->link_fec_options;
2049	}
2050
2051	/* Flow Control - always supported; no need to check against
2052	 * capabilities
2053	 */
2054	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2055
2056	/* Enable link and link update */
2057	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2058
2059	status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2060	if (status) {
2061		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
2062			vsi->vsi_num, ice_stat_str(status));
2063		err = -EIO;
2064	}
2065
2066	kfree(cfg);
2067done:
2068	kfree(pcaps);
2069	return err;
2070}
2071
2072/**
2073 * ice_check_media_subtask - Check for media
2074 * @pf: pointer to PF struct
2075 *
2076 * If media is available, then initialize PHY user configuration if it is not
2077 * been, and configure the PHY if the interface is up.
2078 */
2079static void ice_check_media_subtask(struct ice_pf *pf)
2080{
2081	struct ice_port_info *pi;
2082	struct ice_vsi *vsi;
2083	int err;
2084
2085	/* No need to check for media if it's already present */
2086	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2087		return;
2088
2089	vsi = ice_get_main_vsi(pf);
2090	if (!vsi)
2091		return;
2092
2093	/* Refresh link info and check if media is present */
2094	pi = vsi->port_info;
2095	err = ice_update_link_info(pi);
2096	if (err)
2097		return;
2098
2099	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
2100
2101	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2102		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2103			ice_init_phy_user_cfg(pi);
2104
2105		/* PHY settings are reset on media insertion, reconfigure
2106		 * PHY to preserve settings.
2107		 */
2108		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2109		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2110			return;
2111
2112		err = ice_configure_phy(vsi);
2113		if (!err)
2114			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2115
2116		/* A Link Status Event will be generated; the event handler
2117		 * will complete bringing the interface up
2118		 */
2119	}
2120}
2121
2122/**
2123 * ice_service_task - manage and run subtasks
2124 * @work: pointer to work_struct contained by the PF struct
2125 */
2126static void ice_service_task(struct work_struct *work)
2127{
2128	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2129	unsigned long start_time = jiffies;
2130
2131	/* subtasks */
2132
2133	/* process reset requests first */
2134	ice_reset_subtask(pf);
2135
2136	/* bail if a reset/recovery cycle is pending or rebuild failed */
2137	if (ice_is_reset_in_progress(pf->state) ||
2138	    test_bit(ICE_SUSPENDED, pf->state) ||
2139	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2140		ice_service_task_complete(pf);
2141		return;
2142	}
2143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2144	ice_clean_adminq_subtask(pf);
2145	ice_check_media_subtask(pf);
2146	ice_check_for_hang_subtask(pf);
2147	ice_sync_fltr_subtask(pf);
2148	ice_handle_mdd_event(pf);
2149	ice_watchdog_subtask(pf);
2150
2151	if (ice_is_safe_mode(pf)) {
2152		ice_service_task_complete(pf);
2153		return;
2154	}
2155
2156	ice_process_vflr_event(pf);
2157	ice_clean_mailboxq_subtask(pf);
2158	ice_clean_sbq_subtask(pf);
2159	ice_sync_arfs_fltrs(pf);
2160	ice_flush_fdir_ctx(pf);
2161
2162	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2163	ice_service_task_complete(pf);
2164
2165	/* If the tasks have taken longer than one service timer period
2166	 * or there is more work to be done, reset the service timer to
2167	 * schedule the service task now.
2168	 */
2169	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2170	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2171	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2172	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2173	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2174	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2175	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2176		mod_timer(&pf->serv_tmr, jiffies);
2177}
2178
2179/**
2180 * ice_set_ctrlq_len - helper function to set controlq length
2181 * @hw: pointer to the HW instance
2182 */
2183static void ice_set_ctrlq_len(struct ice_hw *hw)
2184{
2185	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2186	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2187	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2188	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2189	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2190	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2191	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2192	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2193	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2194	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2195	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2196	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2197}
2198
2199/**
2200 * ice_schedule_reset - schedule a reset
2201 * @pf: board private structure
2202 * @reset: reset being requested
2203 */
2204int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2205{
2206	struct device *dev = ice_pf_to_dev(pf);
2207
2208	/* bail out if earlier reset has failed */
2209	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2210		dev_dbg(dev, "earlier reset has failed\n");
2211		return -EIO;
2212	}
2213	/* bail if reset/recovery already in progress */
2214	if (ice_is_reset_in_progress(pf->state)) {
2215		dev_dbg(dev, "Reset already in progress\n");
2216		return -EBUSY;
2217	}
2218
2219	ice_unplug_aux_dev(pf);
2220
2221	switch (reset) {
2222	case ICE_RESET_PFR:
2223		set_bit(ICE_PFR_REQ, pf->state);
2224		break;
2225	case ICE_RESET_CORER:
2226		set_bit(ICE_CORER_REQ, pf->state);
2227		break;
2228	case ICE_RESET_GLOBR:
2229		set_bit(ICE_GLOBR_REQ, pf->state);
2230		break;
2231	default:
2232		return -EINVAL;
2233	}
2234
2235	ice_service_task_schedule(pf);
2236	return 0;
2237}
2238
2239/**
2240 * ice_irq_affinity_notify - Callback for affinity changes
2241 * @notify: context as to what irq was changed
2242 * @mask: the new affinity mask
2243 *
2244 * This is a callback function used by the irq_set_affinity_notifier function
2245 * so that we may register to receive changes to the irq affinity masks.
2246 */
2247static void
2248ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2249			const cpumask_t *mask)
2250{
2251	struct ice_q_vector *q_vector =
2252		container_of(notify, struct ice_q_vector, affinity_notify);
2253
2254	cpumask_copy(&q_vector->affinity_mask, mask);
2255}
2256
2257/**
2258 * ice_irq_affinity_release - Callback for affinity notifier release
2259 * @ref: internal core kernel usage
2260 *
2261 * This is a callback function used by the irq_set_affinity_notifier function
2262 * to inform the current notification subscriber that they will no longer
2263 * receive notifications.
2264 */
2265static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2266
2267/**
2268 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2269 * @vsi: the VSI being configured
2270 */
2271static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2272{
2273	struct ice_hw *hw = &vsi->back->hw;
2274	int i;
2275
2276	ice_for_each_q_vector(vsi, i)
2277		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2278
2279	ice_flush(hw);
2280	return 0;
2281}
2282
2283/**
2284 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2285 * @vsi: the VSI being configured
2286 * @basename: name for the vector
2287 */
2288static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2289{
2290	int q_vectors = vsi->num_q_vectors;
2291	struct ice_pf *pf = vsi->back;
2292	int base = vsi->base_vector;
2293	struct device *dev;
2294	int rx_int_idx = 0;
2295	int tx_int_idx = 0;
2296	int vector, err;
2297	int irq_num;
2298
2299	dev = ice_pf_to_dev(pf);
2300	for (vector = 0; vector < q_vectors; vector++) {
2301		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2302
2303		irq_num = pf->msix_entries[base + vector].vector;
2304
2305		if (q_vector->tx.ring && q_vector->rx.ring) {
2306			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2307				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2308			tx_int_idx++;
2309		} else if (q_vector->rx.ring) {
2310			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2311				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2312		} else if (q_vector->tx.ring) {
2313			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2314				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2315		} else {
2316			/* skip this unused q_vector */
2317			continue;
2318		}
2319		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2320			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2321					       IRQF_SHARED, q_vector->name,
2322					       q_vector);
2323		else
2324			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2325					       0, q_vector->name, q_vector);
2326		if (err) {
2327			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2328				   err);
2329			goto free_q_irqs;
2330		}
2331
2332		/* register for affinity change notifications */
2333		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2334			struct irq_affinity_notify *affinity_notify;
2335
2336			affinity_notify = &q_vector->affinity_notify;
2337			affinity_notify->notify = ice_irq_affinity_notify;
2338			affinity_notify->release = ice_irq_affinity_release;
2339			irq_set_affinity_notifier(irq_num, affinity_notify);
2340		}
2341
2342		/* assign the mask for this irq */
2343		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
 
 
 
 
 
 
 
2344	}
2345
2346	vsi->irqs_ready = true;
2347	return 0;
2348
2349free_q_irqs:
2350	while (vector) {
2351		vector--;
2352		irq_num = pf->msix_entries[base + vector].vector;
2353		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2354			irq_set_affinity_notifier(irq_num, NULL);
2355		irq_set_affinity_hint(irq_num, NULL);
2356		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2357	}
2358	return err;
2359}
2360
2361/**
2362 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2363 * @vsi: VSI to setup Tx rings used by XDP
2364 *
2365 * Return 0 on success and negative value on error
2366 */
2367static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2368{
2369	struct device *dev = ice_pf_to_dev(vsi->back);
2370	int i;
 
2371
2372	for (i = 0; i < vsi->num_xdp_txq; i++) {
2373		u16 xdp_q_idx = vsi->alloc_txq + i;
2374		struct ice_ring *xdp_ring;
 
2375
2376		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
 
 
2377
2378		if (!xdp_ring)
 
 
2379			goto free_xdp_rings;
 
2380
 
2381		xdp_ring->q_index = xdp_q_idx;
2382		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2383		xdp_ring->ring_active = false;
2384		xdp_ring->vsi = vsi;
2385		xdp_ring->netdev = NULL;
2386		xdp_ring->dev = dev;
2387		xdp_ring->count = vsi->num_tx_desc;
2388		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2389		if (ice_setup_tx_ring(xdp_ring))
2390			goto free_xdp_rings;
2391		ice_set_ring_xdp(xdp_ring);
2392		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
 
 
 
 
2393	}
2394
2395	return 0;
2396
2397free_xdp_rings:
2398	for (; i >= 0; i--)
2399		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
 
 
2400			ice_free_tx_ring(vsi->xdp_rings[i]);
 
 
2401	return -ENOMEM;
2402}
2403
2404/**
2405 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2406 * @vsi: VSI to set the bpf prog on
2407 * @prog: the bpf prog pointer
2408 */
2409static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2410{
2411	struct bpf_prog *old_prog;
2412	int i;
2413
2414	old_prog = xchg(&vsi->xdp_prog, prog);
 
 
 
2415	if (old_prog)
2416		bpf_prog_put(old_prog);
 
 
 
 
 
 
 
 
 
2417
2418	ice_for_each_rxq(vsi, i)
2419		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2420}
2421
2422/**
2423 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2424 * @vsi: VSI to bring up Tx rings used by XDP
2425 * @prog: bpf program that will be assigned to VSI
 
2426 *
2427 * Return 0 on success and negative value on error
2428 */
2429int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
 
2430{
2431	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2432	int xdp_rings_rem = vsi->num_xdp_txq;
2433	struct ice_pf *pf = vsi->back;
2434	struct ice_qs_cfg xdp_qs_cfg = {
2435		.qs_mutex = &pf->avail_q_mutex,
2436		.pf_map = pf->avail_txqs,
2437		.pf_map_size = pf->max_pf_txqs,
2438		.q_count = vsi->num_xdp_txq,
2439		.scatter_count = ICE_MAX_SCATTER_TXQS,
2440		.vsi_map = vsi->txq_map,
2441		.vsi_map_offset = vsi->alloc_txq,
2442		.mapping_mode = ICE_VSI_MAP_CONTIG
2443	};
2444	enum ice_status status;
2445	struct device *dev;
2446	int i, v_idx;
2447
2448	dev = ice_pf_to_dev(pf);
2449	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2450				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2451	if (!vsi->xdp_rings)
2452		return -ENOMEM;
2453
2454	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2455	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2456		goto err_map_xdp;
2457
 
 
 
 
2458	if (ice_xdp_alloc_setup_rings(vsi))
2459		goto clear_xdp_rings;
2460
2461	/* follow the logic from ice_vsi_map_rings_to_vectors */
2462	ice_for_each_q_vector(vsi, v_idx) {
2463		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2464		int xdp_rings_per_v, q_id, q_base;
2465
2466		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2467					       vsi->num_q_vectors - v_idx);
2468		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2469
2470		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2471			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2472
2473			xdp_ring->q_vector = q_vector;
2474			xdp_ring->next = q_vector->tx.ring;
2475			q_vector->tx.ring = xdp_ring;
2476		}
2477		xdp_rings_rem -= xdp_rings_per_v;
2478	}
2479
2480	/* omit the scheduler update if in reset path; XDP queues will be
2481	 * taken into account at the end of ice_vsi_rebuild, where
2482	 * ice_cfg_vsi_lan is being called
2483	 */
2484	if (ice_is_reset_in_progress(pf->state))
2485		return 0;
2486
 
 
2487	/* tell the Tx scheduler that right now we have
2488	 * additional queues
2489	 */
2490	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2491		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2492
2493	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2494				 max_txqs);
2495	if (status) {
2496		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2497			ice_stat_str(status));
2498		goto clear_xdp_rings;
2499	}
2500	ice_vsi_assign_bpf_prog(vsi, prog);
 
 
 
 
 
 
 
 
 
 
 
2501
2502	return 0;
2503clear_xdp_rings:
2504	for (i = 0; i < vsi->num_xdp_txq; i++)
2505		if (vsi->xdp_rings[i]) {
2506			kfree_rcu(vsi->xdp_rings[i], rcu);
2507			vsi->xdp_rings[i] = NULL;
2508		}
2509
2510err_map_xdp:
2511	mutex_lock(&pf->avail_q_mutex);
2512	for (i = 0; i < vsi->num_xdp_txq; i++) {
2513		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2514		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2515	}
2516	mutex_unlock(&pf->avail_q_mutex);
2517
2518	devm_kfree(dev, vsi->xdp_rings);
2519	return -ENOMEM;
2520}
2521
2522/**
2523 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2524 * @vsi: VSI to remove XDP rings
 
2525 *
2526 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2527 * resources
2528 */
2529int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2530{
2531	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2532	struct ice_pf *pf = vsi->back;
2533	int i, v_idx;
2534
2535	/* q_vectors are freed in reset path so there's no point in detaching
2536	 * rings; in case of rebuild being triggered not from reset bits
2537	 * in pf->state won't be set, so additionally check first q_vector
2538	 * against NULL
2539	 */
2540	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2541		goto free_qmap;
2542
2543	ice_for_each_q_vector(vsi, v_idx) {
2544		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2545		struct ice_ring *ring;
2546
2547		ice_for_each_ring(ring, q_vector->tx)
2548			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2549				break;
2550
2551		/* restore the value of last node prior to XDP setup */
2552		q_vector->tx.ring = ring;
2553	}
2554
2555free_qmap:
2556	mutex_lock(&pf->avail_q_mutex);
2557	for (i = 0; i < vsi->num_xdp_txq; i++) {
2558		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2559		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2560	}
2561	mutex_unlock(&pf->avail_q_mutex);
2562
2563	for (i = 0; i < vsi->num_xdp_txq; i++)
2564		if (vsi->xdp_rings[i]) {
2565			if (vsi->xdp_rings[i]->desc)
 
2566				ice_free_tx_ring(vsi->xdp_rings[i]);
 
 
 
2567			kfree_rcu(vsi->xdp_rings[i], rcu);
2568			vsi->xdp_rings[i] = NULL;
2569		}
2570
2571	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2572	vsi->xdp_rings = NULL;
2573
2574	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
 
 
 
2575		return 0;
2576
2577	ice_vsi_assign_bpf_prog(vsi, NULL);
2578
2579	/* notify Tx scheduler that we destroyed XDP queues and bring
2580	 * back the old number of child nodes
2581	 */
2582	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2583		max_txqs[i] = vsi->num_txq;
2584
2585	/* change number of XDP Tx queues to 0 */
2586	vsi->num_xdp_txq = 0;
2587
2588	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2589			       max_txqs);
2590}
2591
2592/**
2593 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2594 * @vsi: VSI to schedule napi on
2595 */
2596static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2597{
2598	int i;
2599
2600	ice_for_each_rxq(vsi, i) {
2601		struct ice_ring *rx_ring = vsi->rx_rings[i];
2602
2603		if (rx_ring->xsk_pool)
2604			napi_schedule(&rx_ring->q_vector->napi);
2605	}
2606}
2607
2608/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2609 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2610 * @vsi: VSI to setup XDP for
2611 * @prog: XDP program
2612 * @extack: netlink extended ack
2613 */
2614static int
2615ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2616		   struct netlink_ext_ack *extack)
2617{
2618	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2619	bool if_running = netif_running(vsi->netdev);
2620	int ret = 0, xdp_ring_err = 0;
 
2621
2622	if (frame_size > vsi->rx_buf_len) {
2623		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2624		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
2625	}
2626
 
 
 
2627	/* need to stop netdev while setting up the program for Rx rings */
2628	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2629		ret = ice_down(vsi);
2630		if (ret) {
2631			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2632			return ret;
2633		}
2634	}
2635
2636	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2637		vsi->num_xdp_txq = vsi->alloc_rxq;
2638		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
 
 
 
 
 
 
 
 
 
 
2639		if (xdp_ring_err)
2640			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2641	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2642		xdp_ring_err = ice_destroy_xdp_rings(vsi);
 
2643		if (xdp_ring_err)
2644			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2645	} else {
2646		ice_vsi_assign_bpf_prog(vsi, prog);
 
 
2647	}
2648
2649	if (if_running)
2650		ret = ice_up(vsi);
2651
2652	if (!ret && prog)
2653		ice_vsi_rx_napi_schedule(vsi);
2654
2655	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2656}
2657
2658/**
2659 * ice_xdp_safe_mode - XDP handler for safe mode
2660 * @dev: netdevice
2661 * @xdp: XDP command
2662 */
2663static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2664			     struct netdev_bpf *xdp)
2665{
2666	NL_SET_ERR_MSG_MOD(xdp->extack,
2667			   "Please provide working DDP firmware package in order to use XDP\n"
2668			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2669	return -EOPNOTSUPP;
2670}
2671
2672/**
2673 * ice_xdp - implements XDP handler
2674 * @dev: netdevice
2675 * @xdp: XDP command
2676 */
2677static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2678{
2679	struct ice_netdev_priv *np = netdev_priv(dev);
2680	struct ice_vsi *vsi = np->vsi;
 
2681
2682	if (vsi->type != ICE_VSI_PF) {
2683		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2684		return -EINVAL;
2685	}
2686
 
 
2687	switch (xdp->command) {
2688	case XDP_SETUP_PROG:
2689		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
 
2690	case XDP_SETUP_XSK_POOL:
2691		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2692					  xdp->xsk.queue_id);
2693	default:
2694		return -EINVAL;
2695	}
 
 
 
2696}
2697
2698/**
2699 * ice_ena_misc_vector - enable the non-queue interrupts
2700 * @pf: board private structure
2701 */
2702static void ice_ena_misc_vector(struct ice_pf *pf)
2703{
2704	struct ice_hw *hw = &pf->hw;
 
2705	u32 val;
2706
2707	/* Disable anti-spoof detection interrupt to prevent spurious event
2708	 * interrupts during a function reset. Anti-spoof functionally is
2709	 * still supported.
2710	 */
2711	val = rd32(hw, GL_MDCK_TX_TDPU);
2712	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2713	wr32(hw, GL_MDCK_TX_TDPU, val);
2714
2715	/* clear things first */
2716	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2717	rd32(hw, PFINT_OICR);		/* read to clear */
2718
2719	val = (PFINT_OICR_ECC_ERR_M |
2720	       PFINT_OICR_MAL_DETECT_M |
2721	       PFINT_OICR_GRST_M |
2722	       PFINT_OICR_PCI_EXCEPTION_M |
2723	       PFINT_OICR_VFLR_M |
2724	       PFINT_OICR_HMC_ERR_M |
2725	       PFINT_OICR_PE_PUSH_M |
2726	       PFINT_OICR_PE_CRITERR_M);
2727
2728	wr32(hw, PFINT_OICR_ENA, val);
2729
2730	/* SW_ITR_IDX = 0, but don't change INTENA */
2731	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2732	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2733}
2734
2735/**
2736 * ice_misc_intr - misc interrupt handler
2737 * @irq: interrupt number
2738 * @data: pointer to a q_vector
2739 */
2740static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2741{
2742	struct ice_pf *pf = (struct ice_pf *)data;
 
2743	struct ice_hw *hw = &pf->hw;
2744	irqreturn_t ret = IRQ_NONE;
2745	struct device *dev;
2746	u32 oicr, ena_mask;
2747
2748	dev = ice_pf_to_dev(pf);
2749	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2750	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2751	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2752
2753	oicr = rd32(hw, PFINT_OICR);
2754	ena_mask = rd32(hw, PFINT_OICR_ENA);
2755
2756	if (oicr & PFINT_OICR_SWINT_M) {
2757		ena_mask &= ~PFINT_OICR_SWINT_M;
2758		pf->sw_int_count++;
2759	}
2760
2761	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2762		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2763		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2764	}
2765	if (oicr & PFINT_OICR_VFLR_M) {
2766		/* disable any further VFLR event notifications */
2767		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2768			u32 reg = rd32(hw, PFINT_OICR_ENA);
2769
2770			reg &= ~PFINT_OICR_VFLR_M;
2771			wr32(hw, PFINT_OICR_ENA, reg);
2772		} else {
2773			ena_mask &= ~PFINT_OICR_VFLR_M;
2774			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2775		}
2776	}
2777
2778	if (oicr & PFINT_OICR_GRST_M) {
2779		u32 reset;
2780
2781		/* we have a reset warning */
2782		ena_mask &= ~PFINT_OICR_GRST_M;
2783		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2784			GLGEN_RSTAT_RESET_TYPE_S;
2785
2786		if (reset == ICE_RESET_CORER)
2787			pf->corer_count++;
2788		else if (reset == ICE_RESET_GLOBR)
2789			pf->globr_count++;
2790		else if (reset == ICE_RESET_EMPR)
2791			pf->empr_count++;
2792		else
2793			dev_dbg(dev, "Invalid reset type %d\n", reset);
2794
2795		/* If a reset cycle isn't already in progress, we set a bit in
2796		 * pf->state so that the service task can start a reset/rebuild.
2797		 */
2798		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2799			if (reset == ICE_RESET_CORER)
2800				set_bit(ICE_CORER_RECV, pf->state);
2801			else if (reset == ICE_RESET_GLOBR)
2802				set_bit(ICE_GLOBR_RECV, pf->state);
2803			else
2804				set_bit(ICE_EMPR_RECV, pf->state);
2805
2806			/* There are couple of different bits at play here.
2807			 * hw->reset_ongoing indicates whether the hardware is
2808			 * in reset. This is set to true when a reset interrupt
2809			 * is received and set back to false after the driver
2810			 * has determined that the hardware is out of reset.
2811			 *
2812			 * ICE_RESET_OICR_RECV in pf->state indicates
2813			 * that a post reset rebuild is required before the
2814			 * driver is operational again. This is set above.
2815			 *
2816			 * As this is the start of the reset/rebuild cycle, set
2817			 * both to indicate that.
2818			 */
2819			hw->reset_ongoing = true;
2820		}
2821	}
2822
2823	if (oicr & PFINT_OICR_TSYN_TX_M) {
2824		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
2825		ice_ptp_process_ts(pf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2826	}
2827
2828	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
2829		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2830		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
2831
2832		/* Save EVENTs from GTSYN register */
2833		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
2834						     GLTSYN_STAT_EVENT1_M |
2835						     GLTSYN_STAT_EVENT2_M);
2836		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
2837		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
 
 
 
 
 
 
 
 
 
2838	}
2839
2840#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
2841	if (oicr & ICE_AUX_CRIT_ERR) {
2842		struct iidc_event *event;
2843
2844		ena_mask &= ~ICE_AUX_CRIT_ERR;
2845		event = kzalloc(sizeof(*event), GFP_KERNEL);
2846		if (event) {
2847			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2848			/* report the entire OICR value to AUX driver */
2849			event->reg = oicr;
2850			ice_send_event_to_aux(pf, event);
2851			kfree(event);
2852		}
2853	}
2854
2855	/* Report any remaining unexpected interrupts */
2856	oicr &= ena_mask;
2857	if (oicr) {
2858		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2859		/* If a critical error is pending there is no choice but to
2860		 * reset the device.
2861		 */
2862		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
2863			    PFINT_OICR_ECC_ERR_M)) {
2864			set_bit(ICE_PFR_REQ, pf->state);
2865			ice_service_task_schedule(pf);
2866		}
2867	}
2868	ret = IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2869
2870	ice_service_task_schedule(pf);
 
 
 
 
 
 
 
 
 
 
 
 
 
2871	ice_irq_dynamic_ena(hw, NULL, NULL);
2872
2873	return ret;
2874}
2875
2876/**
2877 * ice_dis_ctrlq_interrupts - disable control queue interrupts
2878 * @hw: pointer to HW structure
2879 */
2880static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2881{
2882	/* disable Admin queue Interrupt causes */
2883	wr32(hw, PFINT_FW_CTL,
2884	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2885
2886	/* disable Mailbox queue Interrupt causes */
2887	wr32(hw, PFINT_MBX_CTL,
2888	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2889
2890	wr32(hw, PFINT_SB_CTL,
2891	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
2892
2893	/* disable Control queue Interrupt causes */
2894	wr32(hw, PFINT_OICR_CTL,
2895	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2896
2897	ice_flush(hw);
2898}
2899
2900/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2901 * ice_free_irq_msix_misc - Unroll misc vector setup
2902 * @pf: board private structure
2903 */
2904static void ice_free_irq_msix_misc(struct ice_pf *pf)
2905{
 
2906	struct ice_hw *hw = &pf->hw;
2907
2908	ice_dis_ctrlq_interrupts(hw);
2909
2910	/* disable OICR interrupt */
2911	wr32(hw, PFINT_OICR_ENA, 0);
2912	ice_flush(hw);
2913
2914	if (pf->msix_entries) {
2915		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2916		devm_free_irq(ice_pf_to_dev(pf),
2917			      pf->msix_entries[pf->oicr_idx].vector, pf);
2918	}
2919
2920	pf->num_avail_sw_msix += 1;
2921	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
 
2922}
2923
2924/**
2925 * ice_ena_ctrlq_interrupts - enable control queue interrupts
2926 * @hw: pointer to HW structure
2927 * @reg_idx: HW vector index to associate the control queue interrupts with
2928 */
2929static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2930{
2931	u32 val;
2932
2933	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2934	       PFINT_OICR_CTL_CAUSE_ENA_M);
2935	wr32(hw, PFINT_OICR_CTL, val);
2936
2937	/* enable Admin queue Interrupt causes */
2938	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2939	       PFINT_FW_CTL_CAUSE_ENA_M);
2940	wr32(hw, PFINT_FW_CTL, val);
2941
2942	/* enable Mailbox queue Interrupt causes */
2943	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2944	       PFINT_MBX_CTL_CAUSE_ENA_M);
2945	wr32(hw, PFINT_MBX_CTL, val);
2946
2947	/* This enables Sideband queue Interrupt causes */
2948	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
2949	       PFINT_SB_CTL_CAUSE_ENA_M);
2950	wr32(hw, PFINT_SB_CTL, val);
 
 
2951
2952	ice_flush(hw);
2953}
2954
2955/**
2956 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2957 * @pf: board private structure
2958 *
2959 * This sets up the handler for MSIX 0, which is used to manage the
2960 * non-queue interrupts, e.g. AdminQ and errors. This is not used
2961 * when in MSI or Legacy interrupt mode.
2962 */
2963static int ice_req_irq_msix_misc(struct ice_pf *pf)
2964{
2965	struct device *dev = ice_pf_to_dev(pf);
2966	struct ice_hw *hw = &pf->hw;
2967	int oicr_idx, err = 0;
 
 
2968
2969	if (!pf->int_name[0])
2970		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2971			 dev_driver_string(dev), dev_name(dev));
2972
 
 
 
2973	/* Do not request IRQ but do enable OICR interrupt since settings are
2974	 * lost during reset. Note that this function is called only during
2975	 * rebuild path and not while reset is in progress.
2976	 */
2977	if (ice_is_reset_in_progress(pf->state))
2978		goto skip_req_irq;
2979
2980	/* reserve one vector in irq_tracker for misc interrupts */
2981	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2982	if (oicr_idx < 0)
2983		return oicr_idx;
 
 
 
 
 
 
 
 
 
 
 
2984
2985	pf->num_avail_sw_msix -= 1;
2986	pf->oicr_idx = (u16)oicr_idx;
 
2987
2988	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2989			       ice_misc_intr, 0, pf->int_name, pf);
 
 
 
 
 
2990	if (err) {
2991		dev_err(dev, "devm_request_irq for %s failed: %d\n",
2992			pf->int_name, err);
2993		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2994		pf->num_avail_sw_msix += 1;
2995		return err;
2996	}
2997
2998skip_req_irq:
2999	ice_ena_misc_vector(pf);
3000
3001	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3002	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
 
 
 
 
 
 
3003	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3004
3005	ice_flush(hw);
3006	ice_irq_dynamic_ena(hw, NULL, NULL);
3007
3008	return 0;
3009}
3010
3011/**
3012 * ice_napi_add - register NAPI handler for the VSI
3013 * @vsi: VSI for which NAPI handler is to be registered
3014 *
3015 * This function is only called in the driver's load path. Registering the NAPI
3016 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3017 * reset/rebuild, etc.)
3018 */
3019static void ice_napi_add(struct ice_vsi *vsi)
3020{
3021	int v_idx;
3022
3023	if (!vsi->netdev)
3024		return;
3025
3026	ice_for_each_q_vector(vsi, v_idx)
3027		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3028			       ice_napi_poll, NAPI_POLL_WEIGHT);
3029}
3030
3031/**
3032 * ice_set_ops - set netdev and ethtools ops for the given netdev
3033 * @netdev: netdev instance
3034 */
3035static void ice_set_ops(struct net_device *netdev)
3036{
 
3037	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3038
3039	if (ice_is_safe_mode(pf)) {
3040		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3041		ice_set_ethtool_safe_mode_ops(netdev);
3042		return;
3043	}
3044
3045	netdev->netdev_ops = &ice_netdev_ops;
3046	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
 
3047	ice_set_ethtool_ops(netdev);
 
 
 
 
 
 
 
 
3048}
3049
3050/**
3051 * ice_set_netdev_features - set features for the given netdev
3052 * @netdev: netdev instance
3053 */
3054static void ice_set_netdev_features(struct net_device *netdev)
3055{
3056	struct ice_pf *pf = ice_netdev_to_pf(netdev);
 
3057	netdev_features_t csumo_features;
3058	netdev_features_t vlano_features;
3059	netdev_features_t dflt_features;
3060	netdev_features_t tso_features;
3061
3062	if (ice_is_safe_mode(pf)) {
3063		/* safe mode */
3064		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3065		netdev->hw_features = netdev->features;
3066		return;
3067	}
3068
3069	dflt_features = NETIF_F_SG	|
3070			NETIF_F_HIGHDMA	|
3071			NETIF_F_NTUPLE	|
3072			NETIF_F_RXHASH;
3073
3074	csumo_features = NETIF_F_RXCSUM	  |
3075			 NETIF_F_IP_CSUM  |
3076			 NETIF_F_SCTP_CRC |
3077			 NETIF_F_IPV6_CSUM;
3078
3079	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3080			 NETIF_F_HW_VLAN_CTAG_TX     |
3081			 NETIF_F_HW_VLAN_CTAG_RX;
3082
 
 
 
 
3083	tso_features = NETIF_F_TSO			|
3084		       NETIF_F_TSO_ECN			|
3085		       NETIF_F_TSO6			|
3086		       NETIF_F_GSO_GRE			|
3087		       NETIF_F_GSO_UDP_TUNNEL		|
3088		       NETIF_F_GSO_GRE_CSUM		|
3089		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3090		       NETIF_F_GSO_PARTIAL		|
3091		       NETIF_F_GSO_IPXIP4		|
3092		       NETIF_F_GSO_IPXIP6		|
3093		       NETIF_F_GSO_UDP_L4;
3094
3095	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3096					NETIF_F_GSO_GRE_CSUM;
3097	/* set features that user can change */
3098	netdev->hw_features = dflt_features | csumo_features |
3099			      vlano_features | tso_features;
3100
3101	/* add support for HW_CSUM on packets with MPLS header */
3102	netdev->mpls_features =  NETIF_F_HW_CSUM;
 
 
3103
3104	/* enable features */
3105	netdev->features |= netdev->hw_features;
 
 
 
 
3106	/* encap and VLAN devices inherit default, csumo and tso features */
3107	netdev->hw_enc_features |= dflt_features | csumo_features |
3108				   tso_features;
3109	netdev->vlan_features |= dflt_features | csumo_features |
3110				 tso_features;
3111}
3112
3113/**
3114 * ice_cfg_netdev - Allocate, configure and register a netdev
3115 * @vsi: the VSI associated with the new netdev
3116 *
3117 * Returns 0 on success, negative value on failure
3118 */
3119static int ice_cfg_netdev(struct ice_vsi *vsi)
3120{
3121	struct ice_netdev_priv *np;
3122	struct net_device *netdev;
3123	u8 mac_addr[ETH_ALEN];
3124
3125	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3126				    vsi->alloc_rxq);
3127	if (!netdev)
3128		return -ENOMEM;
3129
3130	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3131	vsi->netdev = netdev;
3132	np = netdev_priv(netdev);
3133	np->vsi = vsi;
3134
3135	ice_set_netdev_features(netdev);
3136
3137	ice_set_ops(netdev);
3138
3139	if (vsi->type == ICE_VSI_PF) {
3140		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3141		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3142		ether_addr_copy(netdev->dev_addr, mac_addr);
3143		ether_addr_copy(netdev->perm_addr, mac_addr);
3144	}
3145
3146	netdev->priv_flags |= IFF_UNICAST_FLT;
3147
3148	/* Setup netdev TC information */
3149	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3150
3151	/* setup watchdog timeout value to be 5 second */
3152	netdev->watchdog_timeo = 5 * HZ;
3153
3154	netdev->min_mtu = ETH_MIN_MTU;
3155	netdev->max_mtu = ICE_MAX_MTU;
3156
3157	return 0;
3158}
3159
3160/**
3161 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3162 * @lut: Lookup table
3163 * @rss_table_size: Lookup table size
3164 * @rss_size: Range of queue number for hashing
3165 */
3166void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3167{
3168	u16 i;
3169
3170	for (i = 0; i < rss_table_size; i++)
3171		lut[i] = i % rss_size;
3172}
3173
3174/**
3175 * ice_pf_vsi_setup - Set up a PF VSI
3176 * @pf: board private structure
3177 * @pi: pointer to the port_info instance
3178 *
3179 * Returns pointer to the successfully allocated VSI software struct
3180 * on success, otherwise returns NULL on failure.
3181 */
3182static struct ice_vsi *
3183ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3184{
3185	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3186}
3187
3188/**
3189 * ice_ctrl_vsi_setup - Set up a control VSI
3190 * @pf: board private structure
3191 * @pi: pointer to the port_info instance
3192 *
3193 * Returns pointer to the successfully allocated VSI software struct
3194 * on success, otherwise returns NULL on failure.
3195 */
3196static struct ice_vsi *
3197ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3198{
3199	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
 
 
 
 
 
 
3200}
3201
3202/**
3203 * ice_lb_vsi_setup - Set up a loopback VSI
3204 * @pf: board private structure
3205 * @pi: pointer to the port_info instance
3206 *
3207 * Returns pointer to the successfully allocated VSI software struct
3208 * on success, otherwise returns NULL on failure.
3209 */
3210struct ice_vsi *
3211ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3212{
3213	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
 
 
 
 
 
 
3214}
3215
3216/**
3217 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3218 * @netdev: network interface to be adjusted
3219 * @proto: unused protocol
3220 * @vid: VLAN ID to be added
3221 *
3222 * net_device_ops implementation for adding VLAN IDs
3223 */
3224static int
3225ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3226		    u16 vid)
3227{
3228	struct ice_netdev_priv *np = netdev_priv(netdev);
 
3229	struct ice_vsi *vsi = np->vsi;
 
3230	int ret;
3231
3232	/* VLAN 0 is added by default during load/reset */
3233	if (!vid)
3234		return 0;
3235
3236	/* Enable VLAN pruning when a VLAN other than 0 is added */
3237	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3238		ret = ice_cfg_vlan_pruning(vsi, true, false);
 
 
 
 
 
 
 
3239		if (ret)
3240			return ret;
3241	}
3242
 
 
3243	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3244	 * packets aren't pruned by the device's internal switch on Rx
3245	 */
3246	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3247	if (!ret)
3248		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3249
3250	return ret;
3251}
3252
3253/**
3254 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3255 * @netdev: network interface to be adjusted
3256 * @proto: unused protocol
3257 * @vid: VLAN ID to be removed
3258 *
3259 * net_device_ops implementation for removing VLAN IDs
3260 */
3261static int
3262ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3263		     u16 vid)
3264{
3265	struct ice_netdev_priv *np = netdev_priv(netdev);
 
3266	struct ice_vsi *vsi = np->vsi;
 
3267	int ret;
3268
3269	/* don't allow removal of VLAN 0 */
3270	if (!vid)
3271		return 0;
3272
3273	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
 
 
 
 
 
 
 
 
 
 
 
 
 
3274	 * information
3275	 */
3276	ret = ice_vsi_kill_vlan(vsi, vid);
 
3277	if (ret)
3278		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3279
3280	/* Disable pruning when VLAN 0 is the only VLAN rule */
3281	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3282		ret = ice_cfg_vlan_pruning(vsi, false, false);
3283
3284	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3285	return ret;
3286}
3287
3288/**
3289 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3290 * @pf: board private structure
3291 *
3292 * Returns 0 on success, negative value on failure
3293 */
3294static int ice_setup_pf_sw(struct ice_pf *pf)
3295{
3296	struct ice_vsi *vsi;
3297	int status = 0;
3298
3299	if (ice_is_reset_in_progress(pf->state))
3300		return -EBUSY;
 
3301
3302	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3303	if (!vsi)
3304		return -ENOMEM;
 
 
 
 
3305
3306	status = ice_cfg_netdev(vsi);
3307	if (status) {
3308		status = -ENODEV;
3309		goto unroll_vsi_setup;
3310	}
3311	/* netdev has to be configured before setting frame size */
3312	ice_vsi_cfg_frame_size(vsi);
3313
3314	/* Setup DCB netlink interface */
3315	ice_dcbnl_setup(vsi);
 
 
 
 
 
 
 
3316
3317	/* registering the NAPI handler requires both the queues and
3318	 * netdev to be created, which are done in ice_pf_vsi_setup()
3319	 * and ice_cfg_netdev() respectively
3320	 */
3321	ice_napi_add(vsi);
3322
3323	status = ice_set_cpu_rx_rmap(vsi);
3324	if (status) {
3325		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3326			vsi->vsi_num, status);
3327		status = -EINVAL;
3328		goto unroll_napi_add;
3329	}
3330	status = ice_init_mac_fltr(pf);
3331	if (status)
3332		goto free_cpu_rx_map;
3333
3334	return status;
3335
3336free_cpu_rx_map:
3337	ice_free_cpu_rx_rmap(vsi);
3338
3339unroll_napi_add:
3340	if (vsi) {
3341		ice_napi_del(vsi);
3342		if (vsi->netdev) {
3343			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3344			free_netdev(vsi->netdev);
3345			vsi->netdev = NULL;
3346		}
3347	}
3348
3349unroll_vsi_setup:
3350	ice_vsi_release(vsi);
3351	return status;
3352}
3353
3354/**
3355 * ice_get_avail_q_count - Get count of queues in use
3356 * @pf_qmap: bitmap to get queue use count from
3357 * @lock: pointer to a mutex that protects access to pf_qmap
3358 * @size: size of the bitmap
3359 */
3360static u16
3361ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3362{
3363	unsigned long bit;
3364	u16 count = 0;
3365
3366	mutex_lock(lock);
3367	for_each_clear_bit(bit, pf_qmap, size)
3368		count++;
3369	mutex_unlock(lock);
3370
3371	return count;
3372}
3373
3374/**
3375 * ice_get_avail_txq_count - Get count of Tx queues in use
3376 * @pf: pointer to an ice_pf instance
3377 */
3378u16 ice_get_avail_txq_count(struct ice_pf *pf)
3379{
3380	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3381				     pf->max_pf_txqs);
3382}
3383
3384/**
3385 * ice_get_avail_rxq_count - Get count of Rx queues in use
3386 * @pf: pointer to an ice_pf instance
3387 */
3388u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3389{
3390	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3391				     pf->max_pf_rxqs);
3392}
3393
3394/**
3395 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3396 * @pf: board private structure to initialize
3397 */
3398static void ice_deinit_pf(struct ice_pf *pf)
3399{
3400	ice_service_task_stop(pf);
 
 
3401	mutex_destroy(&pf->sw_mutex);
3402	mutex_destroy(&pf->tc_mutex);
3403	mutex_destroy(&pf->avail_q_mutex);
 
3404
3405	if (pf->avail_txqs) {
3406		bitmap_free(pf->avail_txqs);
3407		pf->avail_txqs = NULL;
3408	}
3409
3410	if (pf->avail_rxqs) {
3411		bitmap_free(pf->avail_rxqs);
3412		pf->avail_rxqs = NULL;
3413	}
3414
3415	if (pf->ptp.clock)
3416		ptp_clock_unregister(pf->ptp.clock);
 
 
 
3417}
3418
3419/**
3420 * ice_set_pf_caps - set PFs capability flags
3421 * @pf: pointer to the PF instance
3422 */
3423static void ice_set_pf_caps(struct ice_pf *pf)
3424{
3425	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3426
3427	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3428	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3429	if (func_caps->common_cap.rdma) {
3430		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3431		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3432	}
3433	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3434	if (func_caps->common_cap.dcb)
3435		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3436	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3437	if (func_caps->common_cap.sr_iov_1_1) {
3438		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3439		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3440					      ICE_MAX_VF_COUNT);
3441	}
3442	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3443	if (func_caps->common_cap.rss_table_size)
3444		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3445
3446	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3447	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3448		u16 unused;
3449
3450		/* ctrl_vsi_idx will be set to a valid value when flow director
3451		 * is setup by ice_init_fdir
3452		 */
3453		pf->ctrl_vsi_idx = ICE_NO_VSI;
3454		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3455		/* force guaranteed filter pool for PF */
3456		ice_alloc_fd_guar_item(&pf->hw, &unused,
3457				       func_caps->fd_fltr_guar);
3458		/* force shared filter pool for PF */
3459		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3460				       func_caps->fd_fltr_best_effort);
3461	}
3462
3463	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3464	if (func_caps->common_cap.ieee_1588)
 
3465		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3466
3467	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3468	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3469}
3470
3471/**
3472 * ice_init_pf - Initialize general software structures (struct ice_pf)
3473 * @pf: board private structure to initialize
3474 */
3475static int ice_init_pf(struct ice_pf *pf)
3476{
3477	ice_set_pf_caps(pf);
3478
3479	mutex_init(&pf->sw_mutex);
3480	mutex_init(&pf->tc_mutex);
 
 
3481
3482	INIT_HLIST_HEAD(&pf->aq_wait_list);
3483	spin_lock_init(&pf->aq_wait_lock);
3484	init_waitqueue_head(&pf->aq_wait_queue);
3485
3486	init_waitqueue_head(&pf->reset_wait_queue);
3487
3488	/* setup service timer and periodic service task */
3489	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3490	pf->serv_tmr_period = HZ;
3491	INIT_WORK(&pf->serv_task, ice_service_task);
3492	clear_bit(ICE_SERVICE_SCHED, pf->state);
3493
3494	mutex_init(&pf->avail_q_mutex);
3495	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3496	if (!pf->avail_txqs)
3497		return -ENOMEM;
3498
3499	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3500	if (!pf->avail_rxqs) {
3501		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3502		pf->avail_txqs = NULL;
3503		return -ENOMEM;
3504	}
3505
3506	return 0;
3507}
3508
3509/**
3510 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3511 * @pf: board private structure
3512 *
3513 * compute the number of MSIX vectors required (v_budget) and request from
3514 * the OS. Return the number of vectors reserved or negative on failure
3515 */
3516static int ice_ena_msix_range(struct ice_pf *pf)
3517{
3518	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3519	struct device *dev = ice_pf_to_dev(pf);
3520	int needed, err, i;
3521
3522	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3523	num_cpus = num_online_cpus();
3524
3525	/* reserve for LAN miscellaneous handler */
3526	needed = ICE_MIN_LAN_OICR_MSIX;
3527	if (v_left < needed)
3528		goto no_hw_vecs_left_err;
3529	v_budget += needed;
3530	v_left -= needed;
3531
3532	/* reserve for flow director */
3533	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3534		needed = ICE_FDIR_MSIX;
3535		if (v_left < needed)
3536			goto no_hw_vecs_left_err;
3537		v_budget += needed;
3538		v_left -= needed;
3539	}
3540
3541	/* total used for non-traffic vectors */
3542	v_other = v_budget;
3543
3544	/* reserve vectors for LAN traffic */
3545	needed = num_cpus;
3546	if (v_left < needed)
3547		goto no_hw_vecs_left_err;
3548	pf->num_lan_msix = needed;
3549	v_budget += needed;
3550	v_left -= needed;
3551
3552	/* reserve vectors for RDMA auxiliary driver */
3553	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3554		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3555		if (v_left < needed)
3556			goto no_hw_vecs_left_err;
3557		pf->num_rdma_msix = needed;
3558		v_budget += needed;
3559		v_left -= needed;
3560	}
3561
3562	pf->msix_entries = devm_kcalloc(dev, v_budget,
3563					sizeof(*pf->msix_entries), GFP_KERNEL);
3564	if (!pf->msix_entries) {
3565		err = -ENOMEM;
3566		goto exit_err;
3567	}
3568
3569	for (i = 0; i < v_budget; i++)
3570		pf->msix_entries[i].entry = i;
3571
3572	/* actually reserve the vectors */
3573	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3574					 ICE_MIN_MSIX, v_budget);
3575	if (v_actual < 0) {
3576		dev_err(dev, "unable to reserve MSI-X vectors\n");
3577		err = v_actual;
3578		goto msix_err;
3579	}
3580
3581	if (v_actual < v_budget) {
3582		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3583			 v_budget, v_actual);
3584
3585		if (v_actual < ICE_MIN_MSIX) {
3586			/* error if we can't get minimum vectors */
3587			pci_disable_msix(pf->pdev);
3588			err = -ERANGE;
3589			goto msix_err;
3590		} else {
3591			int v_remain = v_actual - v_other;
3592			int v_rdma = 0, v_min_rdma = 0;
3593
3594			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3595				/* Need at least 1 interrupt in addition to
3596				 * AEQ MSIX
3597				 */
3598				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3599				v_min_rdma = ICE_MIN_RDMA_MSIX;
3600			}
3601
3602			if (v_actual == ICE_MIN_MSIX ||
3603			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3604				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3605				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3606
3607				pf->num_rdma_msix = 0;
3608				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3609			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3610				   (v_remain - v_rdma < v_rdma)) {
3611				/* Support minimum RDMA and give remaining
3612				 * vectors to LAN MSIX
3613				 */
3614				pf->num_rdma_msix = v_min_rdma;
3615				pf->num_lan_msix = v_remain - v_min_rdma;
3616			} else {
3617				/* Split remaining MSIX with RDMA after
3618				 * accounting for AEQ MSIX
3619				 */
3620				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3621						    ICE_RDMA_NUM_AEQ_MSIX;
3622				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3623			}
3624
3625			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3626				   pf->num_lan_msix);
3627
3628			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3629				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3630					   pf->num_rdma_msix);
3631		}
3632	}
3633
3634	return v_actual;
3635
3636msix_err:
3637	devm_kfree(dev, pf->msix_entries);
3638	goto exit_err;
3639
3640no_hw_vecs_left_err:
3641	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3642		needed, v_left);
3643	err = -ERANGE;
3644exit_err:
3645	pf->num_rdma_msix = 0;
3646	pf->num_lan_msix = 0;
3647	return err;
3648}
3649
3650/**
3651 * ice_dis_msix - Disable MSI-X interrupt setup in OS
3652 * @pf: board private structure
3653 */
3654static void ice_dis_msix(struct ice_pf *pf)
3655{
3656	pci_disable_msix(pf->pdev);
3657	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3658	pf->msix_entries = NULL;
3659}
3660
3661/**
3662 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3663 * @pf: board private structure
3664 */
3665static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3666{
3667	ice_dis_msix(pf);
3668
3669	if (pf->irq_tracker) {
3670		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3671		pf->irq_tracker = NULL;
3672	}
3673}
3674
3675/**
3676 * ice_init_interrupt_scheme - Determine proper interrupt scheme
3677 * @pf: board private structure to initialize
3678 */
3679static int ice_init_interrupt_scheme(struct ice_pf *pf)
3680{
3681	int vectors;
3682
3683	vectors = ice_ena_msix_range(pf);
3684
3685	if (vectors < 0)
3686		return vectors;
3687
3688	/* set up vector assignment tracking */
3689	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3690				       struct_size(pf->irq_tracker, list, vectors),
3691				       GFP_KERNEL);
3692	if (!pf->irq_tracker) {
3693		ice_dis_msix(pf);
3694		return -ENOMEM;
3695	}
3696
3697	/* populate SW interrupts pool with number of OS granted IRQs. */
3698	pf->num_avail_sw_msix = (u16)vectors;
3699	pf->irq_tracker->num_entries = (u16)vectors;
3700	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3701
3702	return 0;
3703}
3704
3705/**
3706 * ice_is_wol_supported - check if WoL is supported
3707 * @hw: pointer to hardware info
3708 *
3709 * Check if WoL is supported based on the HW configuration.
3710 * Returns true if NVM supports and enables WoL for this port, false otherwise
3711 */
3712bool ice_is_wol_supported(struct ice_hw *hw)
3713{
3714	u16 wol_ctrl;
3715
3716	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3717	 * word) indicates WoL is not supported on the corresponding PF ID.
3718	 */
3719	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3720		return false;
3721
3722	return !(BIT(hw->port_info->lport) & wol_ctrl);
3723}
3724
3725/**
3726 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3727 * @vsi: VSI being changed
3728 * @new_rx: new number of Rx queues
3729 * @new_tx: new number of Tx queues
 
3730 *
3731 * Only change the number of queues if new_tx, or new_rx is non-0.
3732 *
3733 * Returns 0 on success.
3734 */
3735int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3736{
3737	struct ice_pf *pf = vsi->back;
3738	int err = 0, timeout = 50;
3739
3740	if (!new_rx && !new_tx)
3741		return -EINVAL;
3742
3743	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3744		timeout--;
3745		if (!timeout)
3746			return -EBUSY;
3747		usleep_range(1000, 2000);
3748	}
3749
3750	if (new_tx)
3751		vsi->req_txq = (u16)new_tx;
3752	if (new_rx)
3753		vsi->req_rxq = (u16)new_rx;
3754
3755	/* set for the next time the netdev is started */
3756	if (!netif_running(vsi->netdev)) {
3757		ice_vsi_rebuild(vsi, false);
 
 
3758		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3759		goto done;
3760	}
3761
3762	ice_vsi_close(vsi);
3763	ice_vsi_rebuild(vsi, false);
3764	ice_pf_dcb_recfg(pf);
 
 
 
 
 
 
 
 
 
 
3765	ice_vsi_open(vsi);
 
 
 
 
 
3766done:
3767	clear_bit(ICE_CFG_BUSY, pf->state);
3768	return err;
3769}
3770
3771/**
3772 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3773 * @pf: PF to configure
3774 *
3775 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3776 * VSI can still Tx/Rx VLAN tagged packets.
3777 */
3778static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3779{
3780	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3781	struct ice_vsi_ctx *ctxt;
3782	enum ice_status status;
3783	struct ice_hw *hw;
 
3784
3785	if (!vsi)
3786		return;
3787
3788	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3789	if (!ctxt)
3790		return;
3791
3792	hw = &pf->hw;
3793	ctxt->info = vsi->info;
3794
3795	ctxt->info.valid_sections =
3796		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3797			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3798			    ICE_AQ_VSI_PROP_SW_VALID);
3799
3800	/* disable VLAN anti-spoof */
3801	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3802				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3803
3804	/* disable VLAN pruning and keep all other settings */
3805	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3806
3807	/* allow all VLANs on Tx and don't strip on Rx */
3808	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3809		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3810
3811	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3812	if (status) {
3813		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3814			ice_stat_str(status),
3815			ice_aq_str(hw->adminq.sq_last_status));
3816	} else {
3817		vsi->info.sec_flags = ctxt->info.sec_flags;
3818		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3819		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3820	}
3821
3822	kfree(ctxt);
3823}
3824
3825/**
3826 * ice_log_pkg_init - log result of DDP package load
3827 * @hw: pointer to hardware info
3828 * @status: status of package load
3829 */
3830static void
3831ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3832{
3833	struct ice_pf *pf = (struct ice_pf *)hw->back;
3834	struct device *dev = ice_pf_to_dev(pf);
 
 
3835
3836	switch (*status) {
3837	case ICE_SUCCESS:
3838		/* The package download AdminQ command returned success because
3839		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3840		 * already a package loaded on the device.
3841		 */
3842		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3843		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3844		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3845		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3846		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3847			    sizeof(hw->pkg_name))) {
3848			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3849				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3850					 hw->active_pkg_name,
3851					 hw->active_pkg_ver.major,
3852					 hw->active_pkg_ver.minor,
3853					 hw->active_pkg_ver.update,
3854					 hw->active_pkg_ver.draft);
3855			else
3856				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3857					 hw->active_pkg_name,
3858					 hw->active_pkg_ver.major,
3859					 hw->active_pkg_ver.minor,
3860					 hw->active_pkg_ver.update,
3861					 hw->active_pkg_ver.draft);
3862		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3863			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3864			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3865				hw->active_pkg_name,
3866				hw->active_pkg_ver.major,
3867				hw->active_pkg_ver.minor,
3868				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3869			*status = ICE_ERR_NOT_SUPPORTED;
3870		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3871			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3872			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3873				 hw->active_pkg_name,
3874				 hw->active_pkg_ver.major,
3875				 hw->active_pkg_ver.minor,
3876				 hw->active_pkg_ver.update,
3877				 hw->active_pkg_ver.draft,
3878				 hw->pkg_name,
3879				 hw->pkg_ver.major,
3880				 hw->pkg_ver.minor,
3881				 hw->pkg_ver.update,
3882				 hw->pkg_ver.draft);
3883		} else {
3884			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3885			*status = ICE_ERR_NOT_SUPPORTED;
3886		}
3887		break;
3888	case ICE_ERR_FW_DDP_MISMATCH:
3889		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3890		break;
3891	case ICE_ERR_BUF_TOO_SHORT:
3892	case ICE_ERR_CFG:
3893		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3894		break;
3895	case ICE_ERR_NOT_SUPPORTED:
3896		/* Package File version not supported */
3897		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3898		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3899		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3900			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3901		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3902			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3903			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3904			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3905				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3906		break;
3907	case ICE_ERR_AQ_ERROR:
3908		switch (hw->pkg_dwnld_status) {
3909		case ICE_AQ_RC_ENOSEC:
3910		case ICE_AQ_RC_EBADSIG:
3911			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3912			return;
3913		case ICE_AQ_RC_ESVN:
3914			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3915			return;
3916		case ICE_AQ_RC_EBADMAN:
3917		case ICE_AQ_RC_EBADBUF:
3918			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3919			/* poll for reset to complete */
3920			if (ice_check_reset(hw))
3921				dev_err(dev, "Error resetting device. Please reload the driver\n");
3922			return;
3923		default:
3924			break;
3925		}
3926		fallthrough;
3927	default:
3928		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3929			*status);
3930		break;
3931	}
3932}
3933
3934/**
3935 * ice_load_pkg - load/reload the DDP Package file
3936 * @firmware: firmware structure when firmware requested or NULL for reload
3937 * @pf: pointer to the PF instance
3938 *
3939 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3940 * initialize HW tables.
3941 */
3942static void
3943ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3944{
3945	enum ice_status status = ICE_ERR_PARAM;
3946	struct device *dev = ice_pf_to_dev(pf);
3947	struct ice_hw *hw = &pf->hw;
3948
3949	/* Load DDP Package */
3950	if (firmware && !hw->pkg_copy) {
3951		status = ice_copy_and_init_pkg(hw, firmware->data,
3952					       firmware->size);
3953		ice_log_pkg_init(hw, &status);
3954	} else if (!firmware && hw->pkg_copy) {
3955		/* Reload package during rebuild after CORER/GLOBR reset */
3956		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3957		ice_log_pkg_init(hw, &status);
3958	} else {
3959		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3960	}
3961
3962	if (status) {
3963		/* Safe Mode */
3964		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3965		return;
3966	}
3967
3968	/* Successful download package is the precondition for advanced
3969	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3970	 */
3971	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3972}
3973
3974/**
3975 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3976 * @pf: pointer to the PF structure
3977 *
3978 * There is no error returned here because the driver should be able to handle
3979 * 128 Byte cache lines, so we only print a warning in case issues are seen,
3980 * specifically with Tx.
3981 */
3982static void ice_verify_cacheline_size(struct ice_pf *pf)
3983{
3984	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3985		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3986			 ICE_CACHE_LINE_BYTES);
3987}
3988
3989/**
3990 * ice_send_version - update firmware with driver version
3991 * @pf: PF struct
3992 *
3993 * Returns ICE_SUCCESS on success, else error code
3994 */
3995static enum ice_status ice_send_version(struct ice_pf *pf)
3996{
3997	struct ice_driver_ver dv;
3998
3999	dv.major_ver = 0xff;
4000	dv.minor_ver = 0xff;
4001	dv.build_ver = 0xff;
4002	dv.subbuild_ver = 0;
4003	strscpy((char *)dv.driver_string, UTS_RELEASE,
4004		sizeof(dv.driver_string));
4005	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4006}
4007
4008/**
4009 * ice_init_fdir - Initialize flow director VSI and configuration
4010 * @pf: pointer to the PF instance
4011 *
4012 * returns 0 on success, negative on error
4013 */
4014static int ice_init_fdir(struct ice_pf *pf)
4015{
4016	struct device *dev = ice_pf_to_dev(pf);
4017	struct ice_vsi *ctrl_vsi;
4018	int err;
4019
4020	/* Side Band Flow Director needs to have a control VSI.
4021	 * Allocate it and store it in the PF.
4022	 */
4023	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4024	if (!ctrl_vsi) {
4025		dev_dbg(dev, "could not create control VSI\n");
4026		return -ENOMEM;
4027	}
4028
4029	err = ice_vsi_open_ctrl(ctrl_vsi);
4030	if (err) {
4031		dev_dbg(dev, "could not open control VSI\n");
4032		goto err_vsi_open;
4033	}
4034
4035	mutex_init(&pf->hw.fdir_fltr_lock);
4036
4037	err = ice_fdir_create_dflt_rules(pf);
4038	if (err)
4039		goto err_fdir_rule;
4040
4041	return 0;
4042
4043err_fdir_rule:
4044	ice_fdir_release_flows(&pf->hw);
4045	ice_vsi_close(ctrl_vsi);
4046err_vsi_open:
4047	ice_vsi_release(ctrl_vsi);
4048	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4049		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4050		pf->ctrl_vsi_idx = ICE_NO_VSI;
4051	}
4052	return err;
4053}
4054
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4055/**
4056 * ice_get_opt_fw_name - return optional firmware file name or NULL
4057 * @pf: pointer to the PF instance
4058 */
4059static char *ice_get_opt_fw_name(struct ice_pf *pf)
4060{
4061	/* Optional firmware name same as default with additional dash
4062	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4063	 */
4064	struct pci_dev *pdev = pf->pdev;
4065	char *opt_fw_filename;
4066	u64 dsn;
4067
4068	/* Determine the name of the optional file using the DSN (two
4069	 * dwords following the start of the DSN Capability).
4070	 */
4071	dsn = pci_get_dsn(pdev);
4072	if (!dsn)
4073		return NULL;
4074
4075	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4076	if (!opt_fw_filename)
4077		return NULL;
4078
4079	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4080		 ICE_DDP_PKG_PATH, dsn);
4081
4082	return opt_fw_filename;
4083}
4084
4085/**
4086 * ice_request_fw - Device initialization routine
4087 * @pf: pointer to the PF instance
 
 
 
4088 */
4089static void ice_request_fw(struct ice_pf *pf)
4090{
4091	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4092	const struct firmware *firmware = NULL;
4093	struct device *dev = ice_pf_to_dev(pf);
4094	int err = 0;
4095
4096	/* optional device-specific DDP (if present) overrides the default DDP
4097	 * package file. kernel logs a debug message if the file doesn't exist,
4098	 * and warning messages for other errors.
4099	 */
4100	if (opt_fw_filename) {
4101		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4102		if (err) {
4103			kfree(opt_fw_filename);
4104			goto dflt_pkg_load;
4105		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4106
4107		/* request for firmware was successful. Download to device */
4108		ice_load_pkg(firmware, pf);
4109		kfree(opt_fw_filename);
4110		release_firmware(firmware);
4111		return;
4112	}
4113
4114dflt_pkg_load:
4115	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4116	if (err) {
4117		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4118		return;
 
 
4119	}
4120
4121	/* request for firmware was successful. Download to device */
4122	ice_load_pkg(firmware, pf);
4123	release_firmware(firmware);
 
 
 
 
 
4124}
4125
4126/**
4127 * ice_print_wake_reason - show the wake up cause in the log
4128 * @pf: pointer to the PF struct
4129 */
4130static void ice_print_wake_reason(struct ice_pf *pf)
4131{
4132	u32 wus = pf->wakeup_reason;
4133	const char *wake_str;
4134
4135	/* if no wake event, nothing to print */
4136	if (!wus)
4137		return;
4138
4139	if (wus & PFPM_WUS_LNKC_M)
4140		wake_str = "Link\n";
4141	else if (wus & PFPM_WUS_MAG_M)
4142		wake_str = "Magic Packet\n";
4143	else if (wus & PFPM_WUS_MNG_M)
4144		wake_str = "Management\n";
4145	else if (wus & PFPM_WUS_FW_RST_WK_M)
4146		wake_str = "Firmware Reset\n";
4147	else
4148		wake_str = "Unknown\n";
4149
4150	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4151}
4152
4153/**
4154 * ice_register_netdev - register netdev and devlink port
4155 * @pf: pointer to the PF struct
 
 
4156 */
4157static int ice_register_netdev(struct ice_pf *pf)
4158{
4159	struct ice_vsi *vsi;
4160	int err = 0;
 
 
 
 
 
 
 
 
 
 
4161
4162	vsi = ice_get_main_vsi(pf);
4163	if (!vsi || !vsi->netdev)
4164		return -EIO;
4165
4166	err = register_netdev(vsi->netdev);
4167	if (err)
4168		goto err_register_netdev;
4169
4170	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4171	netif_carrier_off(vsi->netdev);
4172	netif_tx_stop_all_queues(vsi->netdev);
4173	err = ice_devlink_create_port(vsi);
4174	if (err)
4175		goto err_devlink_create;
4176
4177	devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
 
 
 
 
 
 
4178
4179	return 0;
4180err_devlink_create:
4181	unregister_netdev(vsi->netdev);
4182	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4183err_register_netdev:
4184	free_netdev(vsi->netdev);
4185	vsi->netdev = NULL;
4186	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4187	return err;
4188}
4189
4190/**
4191 * ice_probe - Device initialization routine
4192 * @pdev: PCI device information struct
4193 * @ent: entry in ice_pci_tbl
4194 *
4195 * Returns 0 on success, negative on failure
4196 */
4197static int
4198ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4199{
4200	struct device *dev = &pdev->dev;
4201	struct ice_pf *pf;
4202	struct ice_hw *hw;
4203	int i, err;
4204
4205	if (pdev->is_virtfn) {
4206		dev_err(dev, "can't probe a virtual function\n");
4207		return -EINVAL;
4208	}
4209
4210	/* this driver uses devres, see
4211	 * Documentation/driver-api/driver-model/devres.rst
4212	 */
4213	err = pcim_enable_device(pdev);
4214	if (err)
4215		return err;
4216
4217	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4218	if (err) {
4219		dev_err(dev, "BAR0 I/O map error %d\n", err);
4220		return err;
4221	}
4222
4223	pf = ice_allocate_pf(dev);
4224	if (!pf)
4225		return -ENOMEM;
4226
4227	/* initialize Auxiliary index to invalid value */
4228	pf->aux_idx = -1;
4229
4230	/* set up for high or low DMA */
4231	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4232	if (err)
4233		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4234	if (err) {
4235		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4236		return err;
4237	}
4238
4239	pci_enable_pcie_error_reporting(pdev);
4240	pci_set_master(pdev);
4241
4242	pf->pdev = pdev;
4243	pci_set_drvdata(pdev, pf);
4244	set_bit(ICE_DOWN, pf->state);
4245	/* Disable service task until DOWN bit is cleared */
4246	set_bit(ICE_SERVICE_DIS, pf->state);
4247
4248	hw = &pf->hw;
4249	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4250	pci_save_state(pdev);
4251
4252	hw->back = pf;
4253	hw->vendor_id = pdev->vendor;
4254	hw->device_id = pdev->device;
4255	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4256	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4257	hw->subsystem_device_id = pdev->subsystem_device;
4258	hw->bus.device = PCI_SLOT(pdev->devfn);
4259	hw->bus.func = PCI_FUNC(pdev->devfn);
4260	ice_set_ctrlq_len(hw);
4261
4262	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
 
 
 
 
 
4263
4264	err = ice_devlink_register(pf);
4265	if (err) {
4266		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4267		goto err_exit_unroll;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4268	}
4269
4270#ifndef CONFIG_DYNAMIC_DEBUG
4271	if (debug < -1)
4272		hw->debug_mask = debug;
4273#endif
 
 
 
 
4274
4275	err = ice_init_hw(hw);
4276	if (err) {
4277		dev_err(dev, "ice_init_hw failed: %d\n", err);
4278		err = -EIO;
4279		goto err_exit_unroll;
 
 
 
 
 
 
 
 
 
 
 
4280	}
4281
4282	ice_request_fw(pf);
 
 
4283
4284	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4285	 * set in pf->state, which will cause ice_is_safe_mode to return
4286	 * true
4287	 */
4288	if (ice_is_safe_mode(pf)) {
4289		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4290		/* we already got function/device capabilities but these don't
4291		 * reflect what the driver needs to do in safe mode. Instead of
4292		 * adding conditional logic everywhere to ignore these
4293		 * device/function capabilities, override them.
4294		 */
4295		ice_set_safe_mode_caps(hw);
4296	}
4297
4298	err = ice_init_pf(pf);
4299	if (err) {
4300		dev_err(dev, "ice_init_pf failed: %d\n", err);
4301		goto err_init_pf_unroll;
4302	}
4303
4304	ice_devlink_init_regions(pf);
4305
4306	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4307	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4308	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4309	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4310	i = 0;
4311	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4312		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4313			pf->hw.tnl.valid_count[TNL_VXLAN];
4314		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4315			UDP_TUNNEL_TYPE_VXLAN;
4316		i++;
4317	}
4318	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4319		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4320			pf->hw.tnl.valid_count[TNL_GENEVE];
4321		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4322			UDP_TUNNEL_TYPE_GENEVE;
4323		i++;
4324	}
4325
4326	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4327	if (!pf->num_alloc_vsi) {
4328		err = -EIO;
4329		goto err_init_pf_unroll;
4330	}
4331	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4332		dev_warn(&pf->pdev->dev,
4333			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4334			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4335		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4336	}
4337
4338	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4339			       GFP_KERNEL);
4340	if (!pf->vsi) {
4341		err = -ENOMEM;
4342		goto err_init_pf_unroll;
4343	}
4344
4345	err = ice_init_interrupt_scheme(pf);
4346	if (err) {
4347		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4348		err = -EIO;
4349		goto err_init_vsi_unroll;
4350	}
4351
4352	/* In case of MSIX we are going to setup the misc vector right here
4353	 * to handle admin queue events etc. In case of legacy and MSI
4354	 * the misc functionality and queue processing is combined in
4355	 * the same vector and that gets setup at open.
4356	 */
4357	err = ice_req_irq_msix_misc(pf);
4358	if (err) {
4359		dev_err(dev, "setup of misc vector failed: %d\n", err);
4360		goto err_init_interrupt_unroll;
4361	}
4362
4363	/* create switch struct for the switch element created by FW on boot */
4364	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4365	if (!pf->first_sw) {
4366		err = -ENOMEM;
4367		goto err_msix_misc_unroll;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4368	}
4369
4370	if (hw->evb_veb)
4371		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4372	else
4373		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
 
 
 
 
 
 
4374
4375	pf->first_sw->pf = pf;
 
 
 
 
 
 
 
 
 
 
 
 
4376
4377	/* record the sw_id available for later use */
4378	pf->first_sw->sw_id = hw->port_info->sw_id;
 
 
4379
4380	err = ice_setup_pf_sw(pf);
4381	if (err) {
4382		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4383		goto err_alloc_sw_unroll;
4384	}
4385
4386	clear_bit(ICE_SERVICE_DIS, pf->state);
 
4387
4388	/* tell the firmware we are up */
4389	err = ice_send_version(pf);
4390	if (err) {
4391		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4392			UTS_RELEASE, err);
4393		goto err_send_version_unroll;
4394	}
4395
4396	/* since everything is good, start the service timer */
4397	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
 
 
4398
4399	err = ice_init_link_events(pf->hw.port_info);
4400	if (err) {
4401		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4402		goto err_send_version_unroll;
4403	}
4404
4405	/* not a fatal error if this fails */
4406	err = ice_init_nvm_phy_type(pf->hw.port_info);
4407	if (err)
4408		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4409
4410	/* not a fatal error if this fails */
4411	err = ice_update_link_info(pf->hw.port_info);
4412	if (err)
4413		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4414
4415	ice_init_link_dflt_override(pf->hw.port_info);
4416
4417	ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
 
4418
4419	/* if media available, initialize PHY settings */
4420	if (pf->hw.port_info->phy.link_info.link_info &
4421	    ICE_AQ_MEDIA_AVAILABLE) {
4422		/* not a fatal error if this fails */
4423		err = ice_init_phy_user_cfg(pf->hw.port_info);
4424		if (err)
4425			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4426
4427		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4428			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4429
4430			if (vsi)
4431				ice_configure_phy(vsi);
4432		}
4433	} else {
4434		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4435	}
4436
4437	ice_verify_cacheline_size(pf);
 
 
 
 
 
 
 
 
 
 
 
 
4438
4439	/* Save wakeup reason register for later use */
4440	pf->wakeup_reason = rd32(hw, PFPM_WUS);
 
 
4441
4442	/* check for a power management event */
4443	ice_print_wake_reason(pf);
4444
4445	/* clear wake status, all bits */
4446	wr32(hw, PFPM_WUS, U32_MAX);
4447
4448	/* Disable WoL at init, wait for user to enable */
4449	device_set_wakeup_enable(dev, false);
 
4450
4451	if (ice_is_safe_mode(pf)) {
4452		ice_set_safe_mode_vlan_cfg(pf);
4453		goto probe_done;
 
4454	}
4455
4456	/* initialize DDP driven features */
4457	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4458		ice_ptp_init(pf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4459
4460	/* Note: Flow director init failure is non-fatal to load */
4461	if (ice_init_fdir(pf))
4462		dev_err(dev, "could not initialize flow director\n");
 
4463
4464	/* Note: DCB init failure is non-fatal to load */
4465	if (ice_init_pf_dcb(pf, false)) {
4466		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4467		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4468	} else {
4469		ice_cfg_lldp_mib_change(&pf->hw, true);
4470	}
4471
4472	if (ice_init_lag(pf))
4473		dev_warn(dev, "Failed to init link aggregation support\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4474
4475	/* print PCI link speed and width */
4476	pcie_print_link_status(pf->pdev);
 
4477
4478probe_done:
4479	err = ice_register_netdev(pf);
4480	if (err)
4481		goto err_netdev_reg;
 
 
 
 
 
 
 
 
4482
4483	/* ready to go, so clear down state bit */
4484	clear_bit(ICE_DOWN, pf->state);
4485	if (ice_is_aux_ena(pf)) {
4486		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4487		if (pf->aux_idx < 0) {
4488			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4489			err = -ENOMEM;
4490			goto err_netdev_reg;
4491		}
4492
4493		err = ice_init_rdma(pf);
4494		if (err) {
4495			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4496			err = -EIO;
4497			goto err_init_aux_unroll;
4498		}
4499	} else {
4500		dev_warn(dev, "RDMA is not supported on this device\n");
4501	}
4502
4503	return 0;
4504
4505err_init_aux_unroll:
4506	pf->adev = NULL;
4507	ida_free(&ice_aux_ida, pf->aux_idx);
4508err_netdev_reg:
4509err_send_version_unroll:
4510	ice_vsi_release_all(pf);
4511err_alloc_sw_unroll:
 
 
 
 
4512	set_bit(ICE_SERVICE_DIS, pf->state);
4513	set_bit(ICE_DOWN, pf->state);
4514	devm_kfree(dev, pf->first_sw);
4515err_msix_misc_unroll:
4516	ice_free_irq_msix_misc(pf);
4517err_init_interrupt_unroll:
4518	ice_clear_interrupt_scheme(pf);
4519err_init_vsi_unroll:
4520	devm_kfree(dev, pf->vsi);
4521err_init_pf_unroll:
4522	ice_deinit_pf(pf);
4523	ice_devlink_destroy_regions(pf);
4524	ice_deinit_hw(hw);
4525err_exit_unroll:
4526	ice_devlink_unregister(pf);
4527	pci_disable_pcie_error_reporting(pdev);
4528	pci_disable_device(pdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4529	return err;
4530}
4531
4532/**
4533 * ice_set_wake - enable or disable Wake on LAN
4534 * @pf: pointer to the PF struct
4535 *
4536 * Simple helper for WoL control
4537 */
4538static void ice_set_wake(struct ice_pf *pf)
4539{
4540	struct ice_hw *hw = &pf->hw;
4541	bool wol = pf->wol_ena;
4542
4543	/* clear wake state, otherwise new wake events won't fire */
4544	wr32(hw, PFPM_WUS, U32_MAX);
4545
4546	/* enable / disable APM wake up, no RMW needed */
4547	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4548
4549	/* set magic packet filter enabled */
4550	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4551}
4552
4553/**
4554 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4555 * @pf: pointer to the PF struct
4556 *
4557 * Issue firmware command to enable multicast magic wake, making
4558 * sure that any locally administered address (LAA) is used for
4559 * wake, and that PF reset doesn't undo the LAA.
4560 */
4561static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4562{
4563	struct device *dev = ice_pf_to_dev(pf);
4564	struct ice_hw *hw = &pf->hw;
4565	enum ice_status status;
4566	u8 mac_addr[ETH_ALEN];
4567	struct ice_vsi *vsi;
 
4568	u8 flags;
4569
4570	if (!pf->wol_ena)
4571		return;
4572
4573	vsi = ice_get_main_vsi(pf);
4574	if (!vsi)
4575		return;
4576
4577	/* Get current MAC address in case it's an LAA */
4578	if (vsi->netdev)
4579		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4580	else
4581		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4582
4583	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4584		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4585		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4586
4587	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4588	if (status)
4589		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4590			ice_stat_str(status),
4591			ice_aq_str(hw->adminq.sq_last_status));
4592}
4593
4594/**
4595 * ice_remove - Device removal routine
4596 * @pdev: PCI device information struct
4597 */
4598static void ice_remove(struct pci_dev *pdev)
4599{
4600	struct ice_pf *pf = pci_get_drvdata(pdev);
4601	int i;
4602
4603	if (!pf)
4604		return;
4605
4606	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4607		if (!ice_is_reset_in_progress(pf->state))
4608			break;
4609		msleep(100);
4610	}
4611
4612	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4613		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4614		ice_free_vfs(pf);
4615	}
4616
 
 
4617	ice_service_task_stop(pf);
4618
4619	ice_aq_cancel_waiting_tasks(pf);
4620	ice_unplug_aux_dev(pf);
4621	if (pf->aux_idx >= 0)
4622		ida_free(&ice_aux_ida, pf->aux_idx);
4623	set_bit(ICE_DOWN, pf->state);
4624
4625	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4626	ice_deinit_lag(pf);
4627	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4628		ice_ptp_release(pf);
4629	if (!ice_is_safe_mode(pf))
4630		ice_remove_arfs(pf);
 
 
 
 
 
 
 
 
 
 
 
4631	ice_setup_mc_magic_wake(pf);
4632	ice_vsi_release_all(pf);
4633	ice_set_wake(pf);
4634	ice_free_irq_msix_misc(pf);
4635	ice_for_each_vsi(pf, i) {
4636		if (!pf->vsi[i])
4637			continue;
4638		ice_vsi_free_q_vectors(pf->vsi[i]);
4639	}
4640	ice_deinit_pf(pf);
4641	ice_devlink_destroy_regions(pf);
4642	ice_deinit_hw(&pf->hw);
4643	ice_devlink_unregister(pf);
4644
4645	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4646	 * do it via ice_schedule_reset() since there is no need to rebuild
4647	 * and the service task is already stopped.
4648	 */
4649	ice_reset(&pf->hw, ICE_RESET_PFR);
4650	pci_wait_for_pending_transaction(pdev);
4651	ice_clear_interrupt_scheme(pf);
4652	pci_disable_pcie_error_reporting(pdev);
4653	pci_disable_device(pdev);
4654}
4655
4656/**
4657 * ice_shutdown - PCI callback for shutting down device
4658 * @pdev: PCI device information struct
4659 */
4660static void ice_shutdown(struct pci_dev *pdev)
4661{
4662	struct ice_pf *pf = pci_get_drvdata(pdev);
4663
4664	ice_remove(pdev);
4665
4666	if (system_state == SYSTEM_POWER_OFF) {
4667		pci_wake_from_d3(pdev, pf->wol_ena);
4668		pci_set_power_state(pdev, PCI_D3hot);
4669	}
4670}
4671
4672#ifdef CONFIG_PM
4673/**
4674 * ice_prepare_for_shutdown - prep for PCI shutdown
4675 * @pf: board private structure
4676 *
4677 * Inform or close all dependent features in prep for PCI device shutdown
4678 */
4679static void ice_prepare_for_shutdown(struct ice_pf *pf)
4680{
4681	struct ice_hw *hw = &pf->hw;
4682	u32 v;
4683
4684	/* Notify VFs of impending reset */
4685	if (ice_check_sq_alive(hw, &hw->mailboxq))
4686		ice_vc_notify_reset(pf);
4687
4688	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4689
4690	/* disable the VSIs and their queues that are not already DOWN */
4691	ice_pf_dis_all_vsi(pf, false);
4692
4693	ice_for_each_vsi(pf, v)
4694		if (pf->vsi[v])
4695			pf->vsi[v]->vsi_num = 0;
4696
4697	ice_shutdown_all_ctrlq(hw);
4698}
4699
4700/**
4701 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4702 * @pf: board private structure to reinitialize
4703 *
4704 * This routine reinitialize interrupt scheme that was cleared during
4705 * power management suspend callback.
4706 *
4707 * This should be called during resume routine to re-allocate the q_vectors
4708 * and reacquire interrupts.
4709 */
4710static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4711{
4712	struct device *dev = ice_pf_to_dev(pf);
4713	int ret, v;
4714
4715	/* Since we clear MSIX flag during suspend, we need to
4716	 * set it back during resume...
4717	 */
4718
4719	ret = ice_init_interrupt_scheme(pf);
4720	if (ret) {
4721		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4722		return ret;
4723	}
4724
4725	/* Remap vectors and rings, after successful re-init interrupts */
4726	ice_for_each_vsi(pf, v) {
4727		if (!pf->vsi[v])
4728			continue;
4729
4730		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4731		if (ret)
4732			goto err_reinit;
4733		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
 
 
 
4734	}
4735
4736	ret = ice_req_irq_msix_misc(pf);
4737	if (ret) {
4738		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4739			ret);
4740		goto err_reinit;
4741	}
4742
4743	return 0;
4744
4745err_reinit:
4746	while (v--)
4747		if (pf->vsi[v])
 
 
 
4748			ice_vsi_free_q_vectors(pf->vsi[v]);
 
4749
4750	return ret;
4751}
4752
4753/**
4754 * ice_suspend
4755 * @dev: generic device information structure
4756 *
4757 * Power Management callback to quiesce the device and prepare
4758 * for D3 transition.
4759 */
4760static int __maybe_unused ice_suspend(struct device *dev)
4761{
4762	struct pci_dev *pdev = to_pci_dev(dev);
4763	struct ice_pf *pf;
4764	int disabled, v;
4765
4766	pf = pci_get_drvdata(pdev);
4767
4768	if (!ice_pf_state_is_nominal(pf)) {
4769		dev_err(dev, "Device is not ready, no need to suspend it\n");
4770		return -EBUSY;
4771	}
4772
4773	/* Stop watchdog tasks until resume completion.
4774	 * Even though it is most likely that the service task is
4775	 * disabled if the device is suspended or down, the service task's
4776	 * state is controlled by a different state bit, and we should
4777	 * store and honor whatever state that bit is in at this point.
4778	 */
4779	disabled = ice_service_task_stop(pf);
4780
4781	ice_unplug_aux_dev(pf);
4782
4783	/* Already suspended?, then there is nothing to do */
4784	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
4785		if (!disabled)
4786			ice_service_task_restart(pf);
4787		return 0;
4788	}
4789
4790	if (test_bit(ICE_DOWN, pf->state) ||
4791	    ice_is_reset_in_progress(pf->state)) {
4792		dev_err(dev, "can't suspend device in reset or already down\n");
4793		if (!disabled)
4794			ice_service_task_restart(pf);
4795		return 0;
4796	}
4797
4798	ice_setup_mc_magic_wake(pf);
4799
4800	ice_prepare_for_shutdown(pf);
4801
4802	ice_set_wake(pf);
4803
4804	/* Free vectors, clear the interrupt scheme and release IRQs
4805	 * for proper hibernation, especially with large number of CPUs.
4806	 * Otherwise hibernation might fail when mapping all the vectors back
4807	 * to CPU0.
4808	 */
4809	ice_free_irq_msix_misc(pf);
4810	ice_for_each_vsi(pf, v) {
4811		if (!pf->vsi[v])
4812			continue;
 
 
 
4813		ice_vsi_free_q_vectors(pf->vsi[v]);
4814	}
4815	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4816	ice_clear_interrupt_scheme(pf);
4817
4818	pci_save_state(pdev);
4819	pci_wake_from_d3(pdev, pf->wol_ena);
4820	pci_set_power_state(pdev, PCI_D3hot);
4821	return 0;
4822}
4823
4824/**
4825 * ice_resume - PM callback for waking up from D3
4826 * @dev: generic device information structure
4827 */
4828static int __maybe_unused ice_resume(struct device *dev)
4829{
4830	struct pci_dev *pdev = to_pci_dev(dev);
4831	enum ice_reset_req reset_type;
4832	struct ice_pf *pf;
4833	struct ice_hw *hw;
4834	int ret;
4835
4836	pci_set_power_state(pdev, PCI_D0);
4837	pci_restore_state(pdev);
4838	pci_save_state(pdev);
4839
4840	if (!pci_device_is_present(pdev))
4841		return -ENODEV;
4842
4843	ret = pci_enable_device_mem(pdev);
4844	if (ret) {
4845		dev_err(dev, "Cannot enable device after suspend\n");
4846		return ret;
4847	}
4848
4849	pf = pci_get_drvdata(pdev);
4850	hw = &pf->hw;
4851
4852	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4853	ice_print_wake_reason(pf);
4854
4855	/* We cleared the interrupt scheme when we suspended, so we need to
4856	 * restore it now to resume device functionality.
4857	 */
4858	ret = ice_reinit_interrupt_scheme(pf);
4859	if (ret)
4860		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4861
 
 
 
 
 
4862	clear_bit(ICE_DOWN, pf->state);
4863	/* Now perform PF reset and rebuild */
4864	reset_type = ICE_RESET_PFR;
4865	/* re-enable service task for reset, but allow reset to schedule it */
4866	clear_bit(ICE_SERVICE_DIS, pf->state);
4867
4868	if (ice_schedule_reset(pf, reset_type))
4869		dev_err(dev, "Reset during resume failed.\n");
4870
4871	clear_bit(ICE_SUSPENDED, pf->state);
4872	ice_service_task_restart(pf);
4873
4874	/* Restart the service task */
4875	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4876
4877	return 0;
4878}
4879#endif /* CONFIG_PM */
4880
4881/**
4882 * ice_pci_err_detected - warning that PCI error has been detected
4883 * @pdev: PCI device information struct
4884 * @err: the type of PCI error
4885 *
4886 * Called to warn that something happened on the PCI bus and the error handling
4887 * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4888 */
4889static pci_ers_result_t
4890ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4891{
4892	struct ice_pf *pf = pci_get_drvdata(pdev);
4893
4894	if (!pf) {
4895		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4896			__func__, err);
4897		return PCI_ERS_RESULT_DISCONNECT;
4898	}
4899
4900	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4901		ice_service_task_stop(pf);
4902
4903		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4904			set_bit(ICE_PFR_REQ, pf->state);
4905			ice_prepare_for_reset(pf);
4906		}
4907	}
4908
4909	return PCI_ERS_RESULT_NEED_RESET;
4910}
4911
4912/**
4913 * ice_pci_err_slot_reset - a PCI slot reset has just happened
4914 * @pdev: PCI device information struct
4915 *
4916 * Called to determine if the driver can recover from the PCI slot reset by
4917 * using a register read to determine if the device is recoverable.
4918 */
4919static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4920{
4921	struct ice_pf *pf = pci_get_drvdata(pdev);
4922	pci_ers_result_t result;
4923	int err;
4924	u32 reg;
4925
4926	err = pci_enable_device_mem(pdev);
4927	if (err) {
4928		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4929			err);
4930		result = PCI_ERS_RESULT_DISCONNECT;
4931	} else {
4932		pci_set_master(pdev);
4933		pci_restore_state(pdev);
4934		pci_save_state(pdev);
4935		pci_wake_from_d3(pdev, false);
4936
4937		/* Check for life */
4938		reg = rd32(&pf->hw, GLGEN_RTRIG);
4939		if (!reg)
4940			result = PCI_ERS_RESULT_RECOVERED;
4941		else
4942			result = PCI_ERS_RESULT_DISCONNECT;
4943	}
4944
4945	err = pci_aer_clear_nonfatal_status(pdev);
4946	if (err)
4947		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4948			err);
4949		/* non-fatal, continue */
4950
4951	return result;
4952}
4953
4954/**
4955 * ice_pci_err_resume - restart operations after PCI error recovery
4956 * @pdev: PCI device information struct
4957 *
4958 * Called to allow the driver to bring things back up after PCI error and/or
4959 * reset recovery have finished
4960 */
4961static void ice_pci_err_resume(struct pci_dev *pdev)
4962{
4963	struct ice_pf *pf = pci_get_drvdata(pdev);
4964
4965	if (!pf) {
4966		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4967			__func__);
4968		return;
4969	}
4970
4971	if (test_bit(ICE_SUSPENDED, pf->state)) {
4972		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4973			__func__);
4974		return;
4975	}
4976
4977	ice_restore_all_vfs_msi_state(pdev);
4978
4979	ice_do_reset(pf, ICE_RESET_PFR);
4980	ice_service_task_restart(pf);
4981	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4982}
4983
4984/**
4985 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4986 * @pdev: PCI device information struct
4987 */
4988static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4989{
4990	struct ice_pf *pf = pci_get_drvdata(pdev);
4991
4992	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4993		ice_service_task_stop(pf);
4994
4995		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4996			set_bit(ICE_PFR_REQ, pf->state);
4997			ice_prepare_for_reset(pf);
4998		}
4999	}
5000}
5001
5002/**
5003 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5004 * @pdev: PCI device information struct
5005 */
5006static void ice_pci_err_reset_done(struct pci_dev *pdev)
5007{
5008	ice_pci_err_resume(pdev);
5009}
5010
5011/* ice_pci_tbl - PCI Device ID Table
5012 *
5013 * Wildcard entries (PCI_ANY_ID) should come last
5014 * Last entry must be all 0s
5015 *
5016 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5017 *   Class, Class Mask, private data (not used) }
5018 */
5019static const struct pci_device_id ice_pci_tbl[] = {
5020	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5021	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5022	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5023	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5024	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5025	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5026	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5027	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5028	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5029	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5030	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5031	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5032	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5033	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5034	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5035	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5036	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5037	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5038	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5039	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5040	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5041	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5042	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5043	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5044	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5045	/* required last entry */
5046	{ 0, }
5047};
5048MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5049
5050static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5051
5052static const struct pci_error_handlers ice_pci_err_handler = {
5053	.error_detected = ice_pci_err_detected,
5054	.slot_reset = ice_pci_err_slot_reset,
5055	.reset_prepare = ice_pci_err_reset_prepare,
5056	.reset_done = ice_pci_err_reset_done,
5057	.resume = ice_pci_err_resume
5058};
5059
5060static struct pci_driver ice_driver = {
5061	.name = KBUILD_MODNAME,
5062	.id_table = ice_pci_tbl,
5063	.probe = ice_probe,
5064	.remove = ice_remove,
5065#ifdef CONFIG_PM
5066	.driver.pm = &ice_pm_ops,
5067#endif /* CONFIG_PM */
5068	.shutdown = ice_shutdown,
5069	.sriov_configure = ice_sriov_configure,
 
 
5070	.err_handler = &ice_pci_err_handler
5071};
5072
5073/**
5074 * ice_module_init - Driver registration routine
5075 *
5076 * ice_module_init is the first routine called when the driver is
5077 * loaded. All it does is register with the PCI subsystem.
5078 */
5079static int __init ice_module_init(void)
5080{
5081	int status;
5082
5083	pr_info("%s\n", ice_driver_string);
5084	pr_info("%s\n", ice_copyright);
5085
5086	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
 
 
5087	if (!ice_wq) {
5088		pr_err("Failed to create workqueue\n");
5089		return -ENOMEM;
 
 
 
 
 
 
5090	}
5091
 
 
5092	status = pci_register_driver(&ice_driver);
5093	if (status) {
5094		pr_err("failed to register PCI driver, err %d\n", status);
5095		destroy_workqueue(ice_wq);
 
 
 
 
 
 
5096	}
5097
 
 
 
 
 
 
 
 
 
5098	return status;
5099}
5100module_init(ice_module_init);
5101
5102/**
5103 * ice_module_exit - Driver exit cleanup routine
5104 *
5105 * ice_module_exit is called just before the driver is removed
5106 * from memory.
5107 */
5108static void __exit ice_module_exit(void)
5109{
 
5110	pci_unregister_driver(&ice_driver);
 
5111	destroy_workqueue(ice_wq);
 
5112	pr_info("module unloaded\n");
5113}
5114module_exit(ice_module_exit);
5115
5116/**
5117 * ice_set_mac_address - NDO callback to set MAC address
5118 * @netdev: network interface device structure
5119 * @pi: pointer to an address structure
5120 *
5121 * Returns 0 on success, negative on failure
5122 */
5123static int ice_set_mac_address(struct net_device *netdev, void *pi)
5124{
5125	struct ice_netdev_priv *np = netdev_priv(netdev);
5126	struct ice_vsi *vsi = np->vsi;
5127	struct ice_pf *pf = vsi->back;
5128	struct ice_hw *hw = &pf->hw;
5129	struct sockaddr *addr = pi;
5130	enum ice_status status;
5131	u8 old_mac[ETH_ALEN];
5132	u8 flags = 0;
5133	int err = 0;
5134	u8 *mac;
 
5135
5136	mac = (u8 *)addr->sa_data;
5137
5138	if (!is_valid_ether_addr(mac))
5139		return -EADDRNOTAVAIL;
5140
5141	if (ether_addr_equal(netdev->dev_addr, mac)) {
5142		netdev_dbg(netdev, "already using mac %pM\n", mac);
5143		return 0;
5144	}
5145
5146	if (test_bit(ICE_DOWN, pf->state) ||
5147	    ice_is_reset_in_progress(pf->state)) {
5148		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5149			   mac);
5150		return -EBUSY;
5151	}
5152
 
 
 
 
 
 
5153	netif_addr_lock_bh(netdev);
5154	ether_addr_copy(old_mac, netdev->dev_addr);
5155	/* change the netdev's MAC address */
5156	memcpy(netdev->dev_addr, mac, netdev->addr_len);
5157	netif_addr_unlock_bh(netdev);
5158
5159	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5160	status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5161	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
5162		err = -EADDRNOTAVAIL;
5163		goto err_update_filters;
5164	}
5165
5166	/* Add filter for new MAC. If filter exists, return success */
5167	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5168	if (status == ICE_ERR_ALREADY_EXISTS)
5169		/* Although this MAC filter is already present in hardware it's
5170		 * possible in some cases (e.g. bonding) that dev_addr was
5171		 * modified outside of the driver and needs to be restored back
5172		 * to this value.
5173		 */
5174		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5175	else if (status)
 
 
5176		/* error if the new filter addition failed */
5177		err = -EADDRNOTAVAIL;
 
5178
5179err_update_filters:
5180	if (err) {
5181		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5182			   mac);
5183		netif_addr_lock_bh(netdev);
5184		ether_addr_copy(netdev->dev_addr, old_mac);
5185		netif_addr_unlock_bh(netdev);
5186		return err;
5187	}
5188
5189	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5190		   netdev->dev_addr);
5191
5192	/* write new MAC address to the firmware */
5193	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5194	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5195	if (status) {
5196		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
5197			   mac, ice_stat_str(status));
5198	}
5199	return 0;
5200}
5201
5202/**
5203 * ice_set_rx_mode - NDO callback to set the netdev filters
5204 * @netdev: network interface device structure
5205 */
5206static void ice_set_rx_mode(struct net_device *netdev)
5207{
5208	struct ice_netdev_priv *np = netdev_priv(netdev);
5209	struct ice_vsi *vsi = np->vsi;
5210
5211	if (!vsi)
5212		return;
5213
5214	/* Set the flags to synchronize filters
5215	 * ndo_set_rx_mode may be triggered even without a change in netdev
5216	 * flags
5217	 */
5218	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5219	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5220	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5221
5222	/* schedule our worker thread which will take care of
5223	 * applying the new filter changes
5224	 */
5225	ice_service_task_schedule(vsi->back);
5226}
5227
5228/**
5229 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5230 * @netdev: network interface device structure
5231 * @queue_index: Queue ID
5232 * @maxrate: maximum bandwidth in Mbps
5233 */
5234static int
5235ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5236{
5237	struct ice_netdev_priv *np = netdev_priv(netdev);
5238	struct ice_vsi *vsi = np->vsi;
5239	enum ice_status status;
5240	u16 q_handle;
 
5241	u8 tc;
5242
5243	/* Validate maxrate requested is within permitted range */
5244	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5245		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5246			   maxrate, queue_index);
5247		return -EINVAL;
5248	}
5249
5250	q_handle = vsi->tx_rings[queue_index]->q_handle;
5251	tc = ice_dcb_get_tc(vsi, queue_index);
5252
 
 
 
 
 
 
 
5253	/* Set BW back to default, when user set maxrate to 0 */
5254	if (!maxrate)
5255		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5256					       q_handle, ICE_MAX_BW);
5257	else
5258		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5259					  q_handle, ICE_MAX_BW, maxrate * 1000);
5260	if (status) {
5261		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5262			   ice_stat_str(status));
5263		return -EIO;
5264	}
5265
5266	return 0;
5267}
5268
5269/**
5270 * ice_fdb_add - add an entry to the hardware database
5271 * @ndm: the input from the stack
5272 * @tb: pointer to array of nladdr (unused)
5273 * @dev: the net device pointer
5274 * @addr: the MAC address entry being added
5275 * @vid: VLAN ID
5276 * @flags: instructions from stack about fdb operation
 
5277 * @extack: netlink extended ack
5278 */
5279static int
5280ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5281	    struct net_device *dev, const unsigned char *addr, u16 vid,
5282	    u16 flags, struct netlink_ext_ack __always_unused *extack)
 
5283{
5284	int err;
5285
5286	if (vid) {
5287		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5288		return -EINVAL;
5289	}
5290	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5291		netdev_err(dev, "FDB only supports static addresses\n");
5292		return -EINVAL;
5293	}
5294
5295	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5296		err = dev_uc_add_excl(dev, addr);
5297	else if (is_multicast_ether_addr(addr))
5298		err = dev_mc_add_excl(dev, addr);
5299	else
5300		err = -EINVAL;
5301
5302	/* Only return duplicate errors if NLM_F_EXCL is set */
5303	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5304		err = 0;
5305
5306	return err;
5307}
5308
5309/**
5310 * ice_fdb_del - delete an entry from the hardware database
5311 * @ndm: the input from the stack
5312 * @tb: pointer to array of nladdr (unused)
5313 * @dev: the net device pointer
5314 * @addr: the MAC address entry being added
5315 * @vid: VLAN ID
 
 
5316 */
5317static int
5318ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5319	    struct net_device *dev, const unsigned char *addr,
5320	    __always_unused u16 vid)
 
5321{
5322	int err;
5323
5324	if (ndm->ndm_state & NUD_PERMANENT) {
5325		netdev_err(dev, "FDB only supports static addresses\n");
5326		return -EINVAL;
5327	}
5328
5329	if (is_unicast_ether_addr(addr))
5330		err = dev_uc_del(dev, addr);
5331	else if (is_multicast_ether_addr(addr))
5332		err = dev_mc_del(dev, addr);
5333	else
5334		err = -EINVAL;
5335
5336	return err;
5337}
5338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5339/**
5340 * ice_set_features - set the netdev feature flags
5341 * @netdev: ptr to the netdev being adjusted
5342 * @features: the feature set that the stack is suggesting
5343 */
5344static int
5345ice_set_features(struct net_device *netdev, netdev_features_t features)
5346{
 
5347	struct ice_netdev_priv *np = netdev_priv(netdev);
5348	struct ice_vsi *vsi = np->vsi;
5349	struct ice_pf *pf = vsi->back;
5350	int ret = 0;
5351
5352	/* Don't set any netdev advanced features with device in Safe Mode */
5353	if (ice_is_safe_mode(vsi->back)) {
5354		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
 
5355		return ret;
5356	}
5357
5358	/* Do not change setting during reset */
5359	if (ice_is_reset_in_progress(pf->state)) {
5360		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
 
5361		return -EBUSY;
5362	}
5363
5364	/* Multiple features can be changed in one call so keep features in
5365	 * separate if/else statements to guarantee each feature is checked
5366	 */
5367	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5368		ice_vsi_manage_rss_lut(vsi, true);
5369	else if (!(features & NETIF_F_RXHASH) &&
5370		 netdev->features & NETIF_F_RXHASH)
5371		ice_vsi_manage_rss_lut(vsi, false);
5372
5373	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5374	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5375		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5376	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5377		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5378		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5379
5380	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5381	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5382		ret = ice_vsi_manage_vlan_insertion(vsi);
5383	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5384		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5385		ret = ice_vsi_manage_vlan_insertion(vsi);
5386
5387	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5388	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5389		ret = ice_cfg_vlan_pruning(vsi, true, false);
5390	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5391		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5392		ret = ice_cfg_vlan_pruning(vsi, false, false);
5393
5394	if ((features & NETIF_F_NTUPLE) &&
5395	    !(netdev->features & NETIF_F_NTUPLE)) {
5396		ice_vsi_manage_fdir(vsi, true);
5397		ice_init_arfs(vsi);
5398	} else if (!(features & NETIF_F_NTUPLE) &&
5399		 (netdev->features & NETIF_F_NTUPLE)) {
5400		ice_vsi_manage_fdir(vsi, false);
5401		ice_clear_arfs(vsi);
5402	}
5403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5404	return ret;
5405}
5406
5407/**
5408 * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5409 * @vsi: VSI to setup VLAN properties for
5410 */
5411static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5412{
5413	int ret = 0;
 
 
 
 
5414
5415	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5416		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5417	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5418		ret = ice_vsi_manage_vlan_insertion(vsi);
5419
5420	return ret;
5421}
5422
5423/**
5424 * ice_vsi_cfg - Setup the VSI
5425 * @vsi: the VSI being configured
5426 *
5427 * Return 0 on success and negative value on error
5428 */
5429int ice_vsi_cfg(struct ice_vsi *vsi)
5430{
5431	int err;
5432
5433	if (vsi->netdev) {
5434		ice_set_rx_mode(vsi->netdev);
5435
5436		err = ice_vsi_vlan_setup(vsi);
5437
5438		if (err)
5439			return err;
5440	}
5441	ice_vsi_cfg_dcb_rings(vsi);
5442
5443	err = ice_vsi_cfg_lan_txqs(vsi);
5444	if (!err && ice_is_xdp_ena_vsi(vsi))
5445		err = ice_vsi_cfg_xdp_txqs(vsi);
5446	if (!err)
5447		err = ice_vsi_cfg_rxqs(vsi);
5448
5449	return err;
5450}
5451
5452/* THEORY OF MODERATION:
5453 * The below code creates custom DIM profiles for use by this driver, because
5454 * the ice driver hardware works differently than the hardware that DIMLIB was
5455 * originally made for. ice hardware doesn't have packet count limits that
5456 * can trigger an interrupt, but it *does* have interrupt rate limit support,
5457 * and this code adds that capability to be used by the driver when it's using
5458 * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
5459 * for how to "respond" to traffic and interrupts, so this driver uses a
5460 * slightly different set of moderation parameters to get best performance.
5461 */
5462struct ice_dim {
5463	/* the throttle rate for interrupts, basically worst case delay before
5464	 * an initial interrupt fires, value is stored in microseconds.
5465	 */
5466	u16 itr;
5467	/* the rate limit for interrupts, which can cap a delay from a small
5468	 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
5469	 * could yield as much as 500,000 interrupts per second, but with a
5470	 * 10us rate limit, it limits to 100,000 interrupts per second. Value
5471	 * is stored in microseconds.
5472	 */
5473	u16 intrl;
5474};
5475
5476/* Make a different profile for Rx that doesn't allow quite so aggressive
5477 * moderation at the high end (it maxes out at 128us or about 8k interrupts a
5478 * second. The INTRL/rate parameters here are only useful to cap small ITR
5479 * values, which is why for larger ITR's - like 128, which can only generate
5480 * 8k interrupts per second, there is no point to rate limit and the values
5481 * are set to zero. The rate limit values do affect latency, and so must
5482 * be reasonably small so to not impact latency sensitive tests.
5483 */
5484static const struct ice_dim rx_profile[] = {
5485	{2, 10},
5486	{8, 16},
5487	{32, 0},
5488	{96, 0},
5489	{128, 0}
5490};
5491
5492/* The transmit profile, which has the same sorts of values
5493 * as the previous struct
5494 */
5495static const struct ice_dim tx_profile[] = {
5496	{2, 10},
5497	{8, 16},
5498	{64, 0},
5499	{128, 0},
5500	{256, 0}
5501};
5502
5503static void ice_tx_dim_work(struct work_struct *work)
5504{
5505	struct ice_ring_container *rc;
5506	struct ice_q_vector *q_vector;
5507	struct dim *dim;
5508	u16 itr, intrl;
5509
5510	dim = container_of(work, struct dim, work);
5511	rc = container_of(dim, struct ice_ring_container, dim);
5512	q_vector = container_of(rc, struct ice_q_vector, tx);
5513
5514	if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
5515		dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
5516
5517	/* look up the values in our local table */
5518	itr = tx_profile[dim->profile_ix].itr;
5519	intrl = tx_profile[dim->profile_ix].intrl;
5520
5521	ice_trace(tx_dim_work, q_vector, dim);
5522	ice_write_itr(rc, itr);
5523	ice_write_intrl(q_vector, intrl);
5524
5525	dim->state = DIM_START_MEASURE;
5526}
5527
5528static void ice_rx_dim_work(struct work_struct *work)
5529{
5530	struct ice_ring_container *rc;
5531	struct ice_q_vector *q_vector;
5532	struct dim *dim;
5533	u16 itr, intrl;
5534
5535	dim = container_of(work, struct dim, work);
5536	rc = container_of(dim, struct ice_ring_container, dim);
5537	q_vector = container_of(rc, struct ice_q_vector, rx);
5538
5539	if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
5540		dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
5541
5542	/* look up the values in our local table */
5543	itr = rx_profile[dim->profile_ix].itr;
5544	intrl = rx_profile[dim->profile_ix].intrl;
5545
5546	ice_trace(rx_dim_work, q_vector, dim);
5547	ice_write_itr(rc, itr);
5548	ice_write_intrl(q_vector, intrl);
5549
5550	dim->state = DIM_START_MEASURE;
5551}
5552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5553/**
5554 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5555 * @vsi: the VSI being configured
5556 */
5557static void ice_napi_enable_all(struct ice_vsi *vsi)
5558{
5559	int q_idx;
5560
5561	if (!vsi->netdev)
5562		return;
5563
5564	ice_for_each_q_vector(vsi, q_idx) {
5565		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5566
5567		INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
5568		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5569
5570		INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
5571		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5572
5573		if (q_vector->rx.ring || q_vector->tx.ring)
5574			napi_enable(&q_vector->napi);
5575	}
5576}
5577
5578/**
5579 * ice_up_complete - Finish the last steps of bringing up a connection
5580 * @vsi: The VSI being configured
5581 *
5582 * Return 0 on success and negative value on error
5583 */
5584static int ice_up_complete(struct ice_vsi *vsi)
5585{
5586	struct ice_pf *pf = vsi->back;
5587	int err;
5588
5589	ice_vsi_cfg_msix(vsi);
5590
5591	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5592	 * Tx queue group list was configured and the context bits were
5593	 * programmed using ice_vsi_cfg_txqs
5594	 */
5595	err = ice_vsi_start_all_rx_rings(vsi);
5596	if (err)
5597		return err;
5598
5599	clear_bit(ICE_VSI_DOWN, vsi->state);
5600	ice_napi_enable_all(vsi);
5601	ice_vsi_ena_irq(vsi);
5602
5603	if (vsi->port_info &&
5604	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5605	    vsi->netdev) {
 
5606		ice_print_link_msg(vsi, true);
5607		netif_tx_start_all_queues(vsi->netdev);
5608		netif_carrier_on(vsi->netdev);
 
5609	}
5610
5611	ice_service_task_schedule(pf);
 
 
 
 
 
 
5612
5613	return 0;
5614}
5615
5616/**
5617 * ice_up - Bring the connection back up after being down
5618 * @vsi: VSI being configured
5619 */
5620int ice_up(struct ice_vsi *vsi)
5621{
5622	int err;
5623
5624	err = ice_vsi_cfg(vsi);
5625	if (!err)
5626		err = ice_up_complete(vsi);
5627
5628	return err;
5629}
5630
5631/**
5632 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5633 * @ring: Tx or Rx ring to read stats from
 
5634 * @pkts: packets stats counter
5635 * @bytes: bytes stats counter
5636 *
5637 * This function fetches stats from the ring considering the atomic operations
5638 * that needs to be performed to read u64 values in 32 bit machine.
5639 */
5640static void
5641ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
 
5642{
5643	unsigned int start;
5644	*pkts = 0;
5645	*bytes = 0;
5646
5647	if (!ring)
5648		return;
5649	do {
5650		start = u64_stats_fetch_begin_irq(&ring->syncp);
5651		*pkts = ring->stats.pkts;
5652		*bytes = ring->stats.bytes;
5653	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5654}
5655
5656/**
5657 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5658 * @vsi: the VSI to be updated
 
5659 * @rings: rings to work on
5660 * @count: number of rings
5661 */
5662static void
5663ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5664			     u16 count)
 
5665{
5666	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5667	u16 i;
5668
5669	for (i = 0; i < count; i++) {
5670		struct ice_ring *ring;
5671		u64 pkts, bytes;
5672
5673		ring = READ_ONCE(rings[i]);
5674		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
 
 
 
 
5675		vsi_stats->tx_packets += pkts;
5676		vsi_stats->tx_bytes += bytes;
5677		vsi->tx_restart += ring->tx_stats.restart_q;
5678		vsi->tx_busy += ring->tx_stats.tx_busy;
5679		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5680	}
5681}
5682
5683/**
5684 * ice_update_vsi_ring_stats - Update VSI stats counters
5685 * @vsi: the VSI to be updated
5686 */
5687static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5688{
5689	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
 
 
5690	u64 pkts, bytes;
5691	int i;
5692
5693	/* reset netdev stats */
5694	vsi_stats->tx_packets = 0;
5695	vsi_stats->tx_bytes = 0;
5696	vsi_stats->rx_packets = 0;
5697	vsi_stats->rx_bytes = 0;
5698
5699	/* reset non-netdev (extended) stats */
5700	vsi->tx_restart = 0;
5701	vsi->tx_busy = 0;
5702	vsi->tx_linearize = 0;
5703	vsi->rx_buf_failed = 0;
5704	vsi->rx_page_failed = 0;
5705
5706	rcu_read_lock();
5707
5708	/* update Tx rings counters */
5709	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
 
5710
5711	/* update Rx rings counters */
5712	ice_for_each_rxq(vsi, i) {
5713		struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
 
5714
5715		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
 
 
 
5716		vsi_stats->rx_packets += pkts;
5717		vsi_stats->rx_bytes += bytes;
5718		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5719		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5720	}
5721
5722	/* update XDP Tx rings counters */
5723	if (ice_is_xdp_ena_vsi(vsi))
5724		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5725					     vsi->num_xdp_txq);
5726
5727	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5728}
5729
5730/**
5731 * ice_update_vsi_stats - Update VSI stats counters
5732 * @vsi: the VSI to be updated
5733 */
5734void ice_update_vsi_stats(struct ice_vsi *vsi)
5735{
5736	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5737	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5738	struct ice_pf *pf = vsi->back;
5739
5740	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
5741	    test_bit(ICE_CFG_BUSY, pf->state))
5742		return;
5743
5744	/* get stats as recorded by Tx/Rx rings */
5745	ice_update_vsi_ring_stats(vsi);
5746
5747	/* get VSI stats as recorded by the hardware */
5748	ice_update_eth_stats(vsi);
5749
5750	cur_ns->tx_errors = cur_es->tx_errors;
5751	cur_ns->rx_dropped = cur_es->rx_discards;
5752	cur_ns->tx_dropped = cur_es->tx_discards;
5753	cur_ns->multicast = cur_es->rx_multicast;
5754
5755	/* update some more netdev stats if this is main VSI */
5756	if (vsi->type == ICE_VSI_PF) {
5757		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5758		cur_ns->rx_errors = pf->stats.crc_errors +
5759				    pf->stats.illegal_bytes +
5760				    pf->stats.rx_len_errors +
5761				    pf->stats.rx_undersize +
5762				    pf->hw_csum_rx_error +
5763				    pf->stats.rx_jabber +
5764				    pf->stats.rx_fragments +
5765				    pf->stats.rx_oversize;
5766		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5767		/* record drops from the port level */
5768		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5769	}
5770}
5771
5772/**
5773 * ice_update_pf_stats - Update PF port stats counters
5774 * @pf: PF whose stats needs to be updated
5775 */
5776void ice_update_pf_stats(struct ice_pf *pf)
5777{
5778	struct ice_hw_port_stats *prev_ps, *cur_ps;
5779	struct ice_hw *hw = &pf->hw;
5780	u16 fd_ctr_base;
5781	u8 port;
5782
5783	port = hw->port_info->lport;
5784	prev_ps = &pf->stats_prev;
5785	cur_ps = &pf->stats;
5786
 
 
 
5787	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5788			  &prev_ps->eth.rx_bytes,
5789			  &cur_ps->eth.rx_bytes);
5790
5791	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5792			  &prev_ps->eth.rx_unicast,
5793			  &cur_ps->eth.rx_unicast);
5794
5795	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5796			  &prev_ps->eth.rx_multicast,
5797			  &cur_ps->eth.rx_multicast);
5798
5799	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5800			  &prev_ps->eth.rx_broadcast,
5801			  &cur_ps->eth.rx_broadcast);
5802
5803	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5804			  &prev_ps->eth.rx_discards,
5805			  &cur_ps->eth.rx_discards);
5806
5807	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5808			  &prev_ps->eth.tx_bytes,
5809			  &cur_ps->eth.tx_bytes);
5810
5811	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5812			  &prev_ps->eth.tx_unicast,
5813			  &cur_ps->eth.tx_unicast);
5814
5815	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5816			  &prev_ps->eth.tx_multicast,
5817			  &cur_ps->eth.tx_multicast);
5818
5819	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5820			  &prev_ps->eth.tx_broadcast,
5821			  &cur_ps->eth.tx_broadcast);
5822
5823	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5824			  &prev_ps->tx_dropped_link_down,
5825			  &cur_ps->tx_dropped_link_down);
5826
5827	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5828			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5829
5830	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5831			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5832
5833	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5834			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5835
5836	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5837			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5838
5839	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5840			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5841
5842	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5843			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5844
5845	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5846			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5847
5848	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5849			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5850
5851	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5852			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5853
5854	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5855			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5856
5857	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5858			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5859
5860	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5861			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5862
5863	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5864			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5865
5866	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5867			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5868
5869	fd_ctr_base = hw->fd_ctr_base;
5870
5871	ice_stat_update40(hw,
5872			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5873			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5874			  &cur_ps->fd_sb_match);
5875	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5876			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5877
5878	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5879			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5880
5881	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5882			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5883
5884	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5885			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5886
5887	ice_update_dcb_stats(pf);
5888
5889	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5890			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5891
5892	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5893			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5894
5895	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5896			  &prev_ps->mac_local_faults,
5897			  &cur_ps->mac_local_faults);
5898
5899	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5900			  &prev_ps->mac_remote_faults,
5901			  &cur_ps->mac_remote_faults);
5902
5903	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5904			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5905
5906	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5907			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5908
5909	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5910			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5911
5912	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5913			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5914
5915	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5916			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5917
5918	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5919
5920	pf->stat_prev_loaded = true;
5921}
5922
5923/**
5924 * ice_get_stats64 - get statistics for network device structure
5925 * @netdev: network interface device structure
5926 * @stats: main device statistics structure
5927 */
5928static
5929void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5930{
5931	struct ice_netdev_priv *np = netdev_priv(netdev);
5932	struct rtnl_link_stats64 *vsi_stats;
5933	struct ice_vsi *vsi = np->vsi;
5934
5935	vsi_stats = &vsi->net_stats;
5936
5937	if (!vsi->num_txq || !vsi->num_rxq)
5938		return;
5939
5940	/* netdev packet/byte stats come from ring counter. These are obtained
5941	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5942	 * But, only call the update routine and read the registers if VSI is
5943	 * not down.
5944	 */
5945	if (!test_bit(ICE_VSI_DOWN, vsi->state))
5946		ice_update_vsi_ring_stats(vsi);
5947	stats->tx_packets = vsi_stats->tx_packets;
5948	stats->tx_bytes = vsi_stats->tx_bytes;
5949	stats->rx_packets = vsi_stats->rx_packets;
5950	stats->rx_bytes = vsi_stats->rx_bytes;
5951
5952	/* The rest of the stats can be read from the hardware but instead we
5953	 * just return values that the watchdog task has already obtained from
5954	 * the hardware.
5955	 */
5956	stats->multicast = vsi_stats->multicast;
5957	stats->tx_errors = vsi_stats->tx_errors;
5958	stats->tx_dropped = vsi_stats->tx_dropped;
5959	stats->rx_errors = vsi_stats->rx_errors;
5960	stats->rx_dropped = vsi_stats->rx_dropped;
5961	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5962	stats->rx_length_errors = vsi_stats->rx_length_errors;
5963}
5964
5965/**
5966 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5967 * @vsi: VSI having NAPI disabled
5968 */
5969static void ice_napi_disable_all(struct ice_vsi *vsi)
5970{
5971	int q_idx;
5972
5973	if (!vsi->netdev)
5974		return;
5975
5976	ice_for_each_q_vector(vsi, q_idx) {
5977		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5978
5979		if (q_vector->rx.ring || q_vector->tx.ring)
5980			napi_disable(&q_vector->napi);
5981
5982		cancel_work_sync(&q_vector->tx.dim.work);
5983		cancel_work_sync(&q_vector->rx.dim.work);
5984	}
5985}
5986
5987/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5988 * ice_down - Shutdown the connection
5989 * @vsi: The VSI being stopped
 
 
5990 */
5991int ice_down(struct ice_vsi *vsi)
5992{
5993	int i, tx_err, rx_err, link_err = 0;
 
 
5994
5995	/* Caller of this function is expected to set the
5996	 * vsi->state ICE_DOWN bit
5997	 */
5998	if (vsi->netdev) {
 
 
5999		netif_carrier_off(vsi->netdev);
6000		netif_tx_disable(vsi->netdev);
6001	}
6002
6003	ice_vsi_dis_irq(vsi);
6004
6005	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6006	if (tx_err)
6007		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6008			   vsi->vsi_num, tx_err);
6009	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6010		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6011		if (tx_err)
6012			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6013				   vsi->vsi_num, tx_err);
6014	}
6015
6016	rx_err = ice_vsi_stop_all_rx_rings(vsi);
6017	if (rx_err)
6018		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6019			   vsi->vsi_num, rx_err);
6020
6021	ice_napi_disable_all(vsi);
6022
6023	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6024		link_err = ice_force_phys_link_state(vsi, false);
6025		if (link_err)
6026			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
6027				   vsi->vsi_num, link_err);
6028	}
6029
6030	ice_for_each_txq(vsi, i)
6031		ice_clean_tx_ring(vsi->tx_rings[i]);
6032
 
 
 
 
6033	ice_for_each_rxq(vsi, i)
6034		ice_clean_rx_ring(vsi->rx_rings[i]);
6035
6036	if (tx_err || rx_err || link_err) {
6037		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6038			   vsi->vsi_num, vsi->vsw->sw_id);
6039		return -EIO;
6040	}
6041
6042	return 0;
6043}
6044
6045/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6046 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6047 * @vsi: VSI having resources allocated
6048 *
6049 * Return 0 on success, negative on failure
6050 */
6051int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6052{
6053	int i, err = 0;
6054
6055	if (!vsi->num_txq) {
6056		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6057			vsi->vsi_num);
6058		return -EINVAL;
6059	}
6060
6061	ice_for_each_txq(vsi, i) {
6062		struct ice_ring *ring = vsi->tx_rings[i];
6063
6064		if (!ring)
6065			return -EINVAL;
6066
6067		ring->netdev = vsi->netdev;
 
6068		err = ice_setup_tx_ring(ring);
6069		if (err)
6070			break;
6071	}
6072
6073	return err;
6074}
6075
6076/**
6077 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6078 * @vsi: VSI having resources allocated
6079 *
6080 * Return 0 on success, negative on failure
6081 */
6082int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6083{
6084	int i, err = 0;
6085
6086	if (!vsi->num_rxq) {
6087		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6088			vsi->vsi_num);
6089		return -EINVAL;
6090	}
6091
6092	ice_for_each_rxq(vsi, i) {
6093		struct ice_ring *ring = vsi->rx_rings[i];
6094
6095		if (!ring)
6096			return -EINVAL;
6097
6098		ring->netdev = vsi->netdev;
 
6099		err = ice_setup_rx_ring(ring);
6100		if (err)
6101			break;
6102	}
6103
6104	return err;
6105}
6106
6107/**
6108 * ice_vsi_open_ctrl - open control VSI for use
6109 * @vsi: the VSI to open
6110 *
6111 * Initialization of the Control VSI
6112 *
6113 * Returns 0 on success, negative value on error
6114 */
6115int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6116{
6117	char int_name[ICE_INT_NAME_STR_LEN];
6118	struct ice_pf *pf = vsi->back;
6119	struct device *dev;
6120	int err;
6121
6122	dev = ice_pf_to_dev(pf);
6123	/* allocate descriptors */
6124	err = ice_vsi_setup_tx_rings(vsi);
6125	if (err)
6126		goto err_setup_tx;
6127
6128	err = ice_vsi_setup_rx_rings(vsi);
6129	if (err)
6130		goto err_setup_rx;
6131
6132	err = ice_vsi_cfg(vsi);
6133	if (err)
6134		goto err_setup_rx;
6135
6136	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6137		 dev_driver_string(dev), dev_name(dev));
6138	err = ice_vsi_req_irq_msix(vsi, int_name);
6139	if (err)
6140		goto err_setup_rx;
6141
6142	ice_vsi_cfg_msix(vsi);
6143
6144	err = ice_vsi_start_all_rx_rings(vsi);
6145	if (err)
6146		goto err_up_complete;
6147
6148	clear_bit(ICE_VSI_DOWN, vsi->state);
6149	ice_vsi_ena_irq(vsi);
6150
6151	return 0;
6152
6153err_up_complete:
6154	ice_down(vsi);
6155err_setup_rx:
6156	ice_vsi_free_rx_rings(vsi);
6157err_setup_tx:
6158	ice_vsi_free_tx_rings(vsi);
6159
6160	return err;
6161}
6162
6163/**
6164 * ice_vsi_open - Called when a network interface is made active
6165 * @vsi: the VSI to open
6166 *
6167 * Initialization of the VSI
6168 *
6169 * Returns 0 on success, negative value on error
6170 */
6171static int ice_vsi_open(struct ice_vsi *vsi)
6172{
6173	char int_name[ICE_INT_NAME_STR_LEN];
6174	struct ice_pf *pf = vsi->back;
6175	int err;
6176
6177	/* allocate descriptors */
6178	err = ice_vsi_setup_tx_rings(vsi);
6179	if (err)
6180		goto err_setup_tx;
6181
6182	err = ice_vsi_setup_rx_rings(vsi);
6183	if (err)
6184		goto err_setup_rx;
6185
6186	err = ice_vsi_cfg(vsi);
6187	if (err)
6188		goto err_setup_rx;
6189
6190	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6191		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6192	err = ice_vsi_req_irq_msix(vsi, int_name);
6193	if (err)
6194		goto err_setup_rx;
6195
6196	/* Notify the stack of the actual queue counts. */
6197	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6198	if (err)
6199		goto err_set_qs;
 
 
 
 
 
 
 
6200
6201	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6202	if (err)
6203		goto err_set_qs;
6204
6205	err = ice_up_complete(vsi);
6206	if (err)
6207		goto err_up_complete;
6208
6209	return 0;
6210
6211err_up_complete:
6212	ice_down(vsi);
6213err_set_qs:
6214	ice_vsi_free_irq(vsi);
6215err_setup_rx:
6216	ice_vsi_free_rx_rings(vsi);
6217err_setup_tx:
6218	ice_vsi_free_tx_rings(vsi);
6219
6220	return err;
6221}
6222
6223/**
6224 * ice_vsi_release_all - Delete all VSIs
6225 * @pf: PF from which all VSIs are being removed
6226 */
6227static void ice_vsi_release_all(struct ice_pf *pf)
6228{
6229	int err, i;
6230
6231	if (!pf->vsi)
6232		return;
6233
6234	ice_for_each_vsi(pf, i) {
6235		if (!pf->vsi[i])
6236			continue;
6237
 
 
 
6238		err = ice_vsi_release(pf->vsi[i]);
6239		if (err)
6240			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6241				i, err, pf->vsi[i]->vsi_num);
6242	}
6243}
6244
6245/**
6246 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6247 * @pf: pointer to the PF instance
6248 * @type: VSI type to rebuild
6249 *
6250 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6251 */
6252static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6253{
6254	struct device *dev = ice_pf_to_dev(pf);
6255	enum ice_status status;
6256	int i, err;
6257
6258	ice_for_each_vsi(pf, i) {
6259		struct ice_vsi *vsi = pf->vsi[i];
6260
6261		if (!vsi || vsi->type != type)
6262			continue;
6263
6264		/* rebuild the VSI */
6265		err = ice_vsi_rebuild(vsi, true);
6266		if (err) {
6267			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6268				err, vsi->idx, ice_vsi_type_str(type));
6269			return err;
6270		}
6271
6272		/* replay filters for the VSI */
6273		status = ice_replay_vsi(&pf->hw, vsi->idx);
6274		if (status) {
6275			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
6276				ice_stat_str(status), vsi->idx,
6277				ice_vsi_type_str(type));
6278			return -EIO;
6279		}
6280
6281		/* Re-map HW VSI number, using VSI handle that has been
6282		 * previously validated in ice_replay_vsi() call above
6283		 */
6284		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6285
6286		/* enable the VSI */
6287		err = ice_ena_vsi(vsi, false);
6288		if (err) {
6289			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6290				err, vsi->idx, ice_vsi_type_str(type));
6291			return err;
6292		}
6293
6294		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6295			 ice_vsi_type_str(type));
6296	}
6297
6298	return 0;
6299}
6300
6301/**
6302 * ice_update_pf_netdev_link - Update PF netdev link status
6303 * @pf: pointer to the PF instance
6304 */
6305static void ice_update_pf_netdev_link(struct ice_pf *pf)
6306{
6307	bool link_up;
6308	int i;
6309
6310	ice_for_each_vsi(pf, i) {
6311		struct ice_vsi *vsi = pf->vsi[i];
6312
6313		if (!vsi || vsi->type != ICE_VSI_PF)
6314			return;
6315
6316		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6317		if (link_up) {
6318			netif_carrier_on(pf->vsi[i]->netdev);
6319			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6320		} else {
6321			netif_carrier_off(pf->vsi[i]->netdev);
6322			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6323		}
6324	}
6325}
6326
6327/**
6328 * ice_rebuild - rebuild after reset
6329 * @pf: PF to rebuild
6330 * @reset_type: type of reset
6331 *
6332 * Do not rebuild VF VSI in this flow because that is already handled via
6333 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6334 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6335 * to reset/rebuild all the VF VSI twice.
6336 */
6337static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6338{
 
6339	struct device *dev = ice_pf_to_dev(pf);
6340	struct ice_hw *hw = &pf->hw;
6341	enum ice_status ret;
6342	int err;
6343
6344	if (test_bit(ICE_DOWN, pf->state))
6345		goto clear_recovery;
6346
6347	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6348
6349	ret = ice_init_all_ctrlq(hw);
6350	if (ret) {
6351		dev_err(dev, "control queues init failed %s\n",
6352			ice_stat_str(ret));
 
 
 
 
 
 
 
 
 
 
6353		goto err_init_ctrlq;
6354	}
6355
6356	/* if DDP was previously loaded successfully */
6357	if (!ice_is_safe_mode(pf)) {
6358		/* reload the SW DB of filter tables */
6359		if (reset_type == ICE_RESET_PFR)
6360			ice_fill_blk_tbls(hw);
6361		else
6362			/* Reload DDP Package after CORER/GLOBR reset */
6363			ice_load_pkg(NULL, pf);
6364	}
6365
6366	ret = ice_clear_pf_cfg(hw);
6367	if (ret) {
6368		dev_err(dev, "clear PF configuration failed %s\n",
6369			ice_stat_str(ret));
6370		goto err_init_ctrlq;
6371	}
6372
6373	if (pf->first_sw->dflt_vsi_ena)
6374		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6375	/* clear the default VSI configuration if it exists */
6376	pf->first_sw->dflt_vsi = NULL;
6377	pf->first_sw->dflt_vsi_ena = false;
6378
6379	ice_clear_pxe_mode(hw);
6380
6381	ret = ice_init_nvm(hw);
6382	if (ret) {
6383		dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
6384		goto err_init_ctrlq;
6385	}
6386
6387	ret = ice_get_caps(hw);
6388	if (ret) {
6389		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6390		goto err_init_ctrlq;
6391	}
6392
6393	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6394	if (ret) {
6395		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6396		goto err_init_ctrlq;
6397	}
6398
 
 
 
 
 
 
6399	err = ice_sched_init_port(hw->port_info);
6400	if (err)
6401		goto err_sched_init_port;
6402
6403	/* start misc vector */
6404	err = ice_req_irq_msix_misc(pf);
6405	if (err) {
6406		dev_err(dev, "misc vector setup failed: %d\n", err);
6407		goto err_sched_init_port;
6408	}
6409
6410	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6411		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6412		if (!rd32(hw, PFQF_FD_SIZE)) {
6413			u16 unused, guar, b_effort;
6414
6415			guar = hw->func_caps.fd_fltr_guar;
6416			b_effort = hw->func_caps.fd_fltr_best_effort;
6417
6418			/* force guaranteed filter pool for PF */
6419			ice_alloc_fd_guar_item(hw, &unused, guar);
6420			/* force shared filter pool for PF */
6421			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6422		}
6423	}
6424
6425	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6426		ice_dcb_rebuild(pf);
6427
6428	/* If the PF previously had enabled PTP, PTP init needs to happen before
6429	 * the VSI rebuild. If not, this causes the PTP link status events to
6430	 * fail.
6431	 */
6432	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6433		ice_ptp_init(pf);
 
 
 
6434
6435	/* rebuild PF VSI */
6436	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6437	if (err) {
6438		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6439		goto err_vsi_rebuild;
6440	}
6441
 
 
 
 
 
 
 
 
 
6442	/* If Flow Director is active */
6443	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6444		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6445		if (err) {
6446			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6447			goto err_vsi_rebuild;
6448		}
6449
6450		/* replay HW Flow Director recipes */
6451		if (hw->fdir_prof)
6452			ice_fdir_replay_flows(hw);
6453
6454		/* replay Flow Director filters */
6455		ice_fdir_replay_fltrs(pf);
6456
6457		ice_rebuild_arfs(pf);
6458	}
6459
 
 
 
6460	ice_update_pf_netdev_link(pf);
6461
6462	/* tell the firmware we are up */
6463	ret = ice_send_version(pf);
6464	if (ret) {
6465		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6466			ice_stat_str(ret));
6467		goto err_vsi_rebuild;
6468	}
6469
6470	ice_replay_post(hw);
6471
6472	/* if we get here, reset flow is successful */
6473	clear_bit(ICE_RESET_FAILED, pf->state);
6474
6475	ice_plug_aux_dev(pf);
 
 
 
 
 
6476	return;
6477
6478err_vsi_rebuild:
6479err_sched_init_port:
6480	ice_sched_cleanup_all(hw);
6481err_init_ctrlq:
6482	ice_shutdown_all_ctrlq(hw);
6483	set_bit(ICE_RESET_FAILED, pf->state);
6484clear_recovery:
6485	/* set this bit in PF state to control service task scheduling */
6486	set_bit(ICE_NEEDS_RESTART, pf->state);
6487	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6488}
6489
6490/**
6491 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6492 * @vsi: Pointer to VSI structure
6493 */
6494static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6495{
6496	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6497		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6498	else
6499		return ICE_RXBUF_3072;
6500}
6501
6502/**
6503 * ice_change_mtu - NDO callback to change the MTU
6504 * @netdev: network interface device structure
6505 * @new_mtu: new value for maximum frame size
6506 *
6507 * Returns 0 on success, negative on failure
6508 */
6509static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6510{
6511	struct ice_netdev_priv *np = netdev_priv(netdev);
6512	struct ice_vsi *vsi = np->vsi;
6513	struct ice_pf *pf = vsi->back;
6514	struct iidc_event *event;
6515	u8 count = 0;
6516	int err = 0;
6517
6518	if (new_mtu == (int)netdev->mtu) {
6519		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6520		return 0;
6521	}
6522
6523	if (ice_is_xdp_ena_vsi(vsi)) {
 
6524		int frame_size = ice_max_xdp_frame_size(vsi);
6525
6526		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6527			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6528				   frame_size - ICE_ETH_PKT_HDR_PAD);
6529			return -EINVAL;
6530		}
 
 
 
 
 
 
6531	}
6532
6533	/* if a reset is in progress, wait for some time for it to complete */
6534	do {
6535		if (ice_is_reset_in_progress(pf->state)) {
6536			count++;
6537			usleep_range(1000, 2000);
6538		} else {
6539			break;
6540		}
6541
6542	} while (count < 100);
6543
6544	if (count == 100) {
6545		netdev_err(netdev, "can't change MTU. Device is busy\n");
6546		return -EBUSY;
6547	}
6548
6549	event = kzalloc(sizeof(*event), GFP_KERNEL);
6550	if (!event)
6551		return -ENOMEM;
6552
6553	set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6554	ice_send_event_to_aux(pf, event);
6555	clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6556
6557	netdev->mtu = (unsigned int)new_mtu;
6558
6559	/* if VSI is up, bring it down and then back up */
6560	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6561		err = ice_down(vsi);
6562		if (err) {
6563			netdev_err(netdev, "change MTU if_down err %d\n", err);
6564			goto event_after;
6565		}
6566
6567		err = ice_up(vsi);
6568		if (err) {
6569			netdev_err(netdev, "change MTU if_up err %d\n", err);
6570			goto event_after;
6571		}
6572	}
6573
6574	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6575event_after:
6576	set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6577	ice_send_event_to_aux(pf, event);
6578	kfree(event);
6579
6580	return err;
6581}
6582
6583/**
6584 * ice_do_ioctl - Access the hwtstamp interface
6585 * @netdev: network interface device structure
6586 * @ifr: interface request data
6587 * @cmd: ioctl command
6588 */
6589static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6590{
6591	struct ice_netdev_priv *np = netdev_priv(netdev);
6592	struct ice_pf *pf = np->vsi->back;
6593
6594	switch (cmd) {
6595	case SIOCGHWTSTAMP:
6596		return ice_ptp_get_ts_config(pf, ifr);
6597	case SIOCSHWTSTAMP:
6598		return ice_ptp_set_ts_config(pf, ifr);
6599	default:
6600		return -EOPNOTSUPP;
6601	}
6602}
6603
6604/**
6605 * ice_aq_str - convert AQ err code to a string
6606 * @aq_err: the AQ error code to convert
6607 */
6608const char *ice_aq_str(enum ice_aq_err aq_err)
6609{
6610	switch (aq_err) {
6611	case ICE_AQ_RC_OK:
6612		return "OK";
6613	case ICE_AQ_RC_EPERM:
6614		return "ICE_AQ_RC_EPERM";
6615	case ICE_AQ_RC_ENOENT:
6616		return "ICE_AQ_RC_ENOENT";
6617	case ICE_AQ_RC_ENOMEM:
6618		return "ICE_AQ_RC_ENOMEM";
6619	case ICE_AQ_RC_EBUSY:
6620		return "ICE_AQ_RC_EBUSY";
6621	case ICE_AQ_RC_EEXIST:
6622		return "ICE_AQ_RC_EEXIST";
6623	case ICE_AQ_RC_EINVAL:
6624		return "ICE_AQ_RC_EINVAL";
6625	case ICE_AQ_RC_ENOSPC:
6626		return "ICE_AQ_RC_ENOSPC";
6627	case ICE_AQ_RC_ENOSYS:
6628		return "ICE_AQ_RC_ENOSYS";
6629	case ICE_AQ_RC_EMODE:
6630		return "ICE_AQ_RC_EMODE";
6631	case ICE_AQ_RC_ENOSEC:
6632		return "ICE_AQ_RC_ENOSEC";
6633	case ICE_AQ_RC_EBADSIG:
6634		return "ICE_AQ_RC_EBADSIG";
6635	case ICE_AQ_RC_ESVN:
6636		return "ICE_AQ_RC_ESVN";
6637	case ICE_AQ_RC_EBADMAN:
6638		return "ICE_AQ_RC_EBADMAN";
6639	case ICE_AQ_RC_EBADBUF:
6640		return "ICE_AQ_RC_EBADBUF";
6641	}
6642
6643	return "ICE_AQ_RC_UNKNOWN";
6644}
6645
6646/**
6647 * ice_stat_str - convert status err code to a string
6648 * @stat_err: the status error code to convert
6649 */
6650const char *ice_stat_str(enum ice_status stat_err)
6651{
6652	switch (stat_err) {
6653	case ICE_SUCCESS:
6654		return "OK";
6655	case ICE_ERR_PARAM:
6656		return "ICE_ERR_PARAM";
6657	case ICE_ERR_NOT_IMPL:
6658		return "ICE_ERR_NOT_IMPL";
6659	case ICE_ERR_NOT_READY:
6660		return "ICE_ERR_NOT_READY";
6661	case ICE_ERR_NOT_SUPPORTED:
6662		return "ICE_ERR_NOT_SUPPORTED";
6663	case ICE_ERR_BAD_PTR:
6664		return "ICE_ERR_BAD_PTR";
6665	case ICE_ERR_INVAL_SIZE:
6666		return "ICE_ERR_INVAL_SIZE";
6667	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6668		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6669	case ICE_ERR_RESET_FAILED:
6670		return "ICE_ERR_RESET_FAILED";
6671	case ICE_ERR_FW_API_VER:
6672		return "ICE_ERR_FW_API_VER";
6673	case ICE_ERR_NO_MEMORY:
6674		return "ICE_ERR_NO_MEMORY";
6675	case ICE_ERR_CFG:
6676		return "ICE_ERR_CFG";
6677	case ICE_ERR_OUT_OF_RANGE:
6678		return "ICE_ERR_OUT_OF_RANGE";
6679	case ICE_ERR_ALREADY_EXISTS:
6680		return "ICE_ERR_ALREADY_EXISTS";
6681	case ICE_ERR_NVM:
6682		return "ICE_ERR_NVM";
6683	case ICE_ERR_NVM_CHECKSUM:
6684		return "ICE_ERR_NVM_CHECKSUM";
6685	case ICE_ERR_BUF_TOO_SHORT:
6686		return "ICE_ERR_BUF_TOO_SHORT";
6687	case ICE_ERR_NVM_BLANK_MODE:
6688		return "ICE_ERR_NVM_BLANK_MODE";
6689	case ICE_ERR_IN_USE:
6690		return "ICE_ERR_IN_USE";
6691	case ICE_ERR_MAX_LIMIT:
6692		return "ICE_ERR_MAX_LIMIT";
6693	case ICE_ERR_RESET_ONGOING:
6694		return "ICE_ERR_RESET_ONGOING";
6695	case ICE_ERR_HW_TABLE:
6696		return "ICE_ERR_HW_TABLE";
6697	case ICE_ERR_DOES_NOT_EXIST:
6698		return "ICE_ERR_DOES_NOT_EXIST";
6699	case ICE_ERR_FW_DDP_MISMATCH:
6700		return "ICE_ERR_FW_DDP_MISMATCH";
6701	case ICE_ERR_AQ_ERROR:
6702		return "ICE_ERR_AQ_ERROR";
6703	case ICE_ERR_AQ_TIMEOUT:
6704		return "ICE_ERR_AQ_TIMEOUT";
6705	case ICE_ERR_AQ_FULL:
6706		return "ICE_ERR_AQ_FULL";
6707	case ICE_ERR_AQ_NO_WORK:
6708		return "ICE_ERR_AQ_NO_WORK";
6709	case ICE_ERR_AQ_EMPTY:
6710		return "ICE_ERR_AQ_EMPTY";
6711	case ICE_ERR_AQ_FW_CRITICAL:
6712		return "ICE_ERR_AQ_FW_CRITICAL";
6713	}
6714
6715	return "ICE_ERR_UNKNOWN";
6716}
6717
6718/**
6719 * ice_set_rss_lut - Set RSS LUT
6720 * @vsi: Pointer to VSI structure
6721 * @lut: Lookup table
6722 * @lut_size: Lookup table size
6723 *
6724 * Returns 0 on success, negative on failure
6725 */
6726int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6727{
6728	struct ice_aq_get_set_rss_lut_params params = {};
6729	struct ice_hw *hw = &vsi->back->hw;
6730	enum ice_status status;
6731
6732	if (!lut)
6733		return -EINVAL;
6734
6735	params.vsi_handle = vsi->idx;
6736	params.lut_size = lut_size;
6737	params.lut_type = vsi->rss_lut_type;
6738	params.lut = lut;
6739
6740	status = ice_aq_set_rss_lut(hw, &params);
6741	if (status) {
6742		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
6743			ice_stat_str(status),
6744			ice_aq_str(hw->adminq.sq_last_status));
6745		return -EIO;
6746	}
6747
6748	return 0;
6749}
6750
6751/**
6752 * ice_set_rss_key - Set RSS key
6753 * @vsi: Pointer to the VSI structure
6754 * @seed: RSS hash seed
6755 *
6756 * Returns 0 on success, negative on failure
6757 */
6758int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6759{
6760	struct ice_hw *hw = &vsi->back->hw;
6761	enum ice_status status;
6762
6763	if (!seed)
6764		return -EINVAL;
6765
6766	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6767	if (status) {
6768		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
6769			ice_stat_str(status),
6770			ice_aq_str(hw->adminq.sq_last_status));
6771		return -EIO;
6772	}
6773
6774	return 0;
6775}
6776
6777/**
6778 * ice_get_rss_lut - Get RSS LUT
6779 * @vsi: Pointer to VSI structure
6780 * @lut: Buffer to store the lookup table entries
6781 * @lut_size: Size of buffer to store the lookup table entries
6782 *
6783 * Returns 0 on success, negative on failure
6784 */
6785int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6786{
6787	struct ice_aq_get_set_rss_lut_params params = {};
6788	struct ice_hw *hw = &vsi->back->hw;
6789	enum ice_status status;
6790
6791	if (!lut)
6792		return -EINVAL;
6793
6794	params.vsi_handle = vsi->idx;
6795	params.lut_size = lut_size;
6796	params.lut_type = vsi->rss_lut_type;
6797	params.lut = lut;
6798
6799	status = ice_aq_get_rss_lut(hw, &params);
6800	if (status) {
6801		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
6802			ice_stat_str(status),
6803			ice_aq_str(hw->adminq.sq_last_status));
6804		return -EIO;
6805	}
6806
6807	return 0;
6808}
6809
6810/**
6811 * ice_get_rss_key - Get RSS key
6812 * @vsi: Pointer to VSI structure
6813 * @seed: Buffer to store the key in
6814 *
6815 * Returns 0 on success, negative on failure
6816 */
6817int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
6818{
6819	struct ice_hw *hw = &vsi->back->hw;
6820	enum ice_status status;
6821
6822	if (!seed)
6823		return -EINVAL;
6824
6825	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6826	if (status) {
6827		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
6828			ice_stat_str(status),
6829			ice_aq_str(hw->adminq.sq_last_status));
6830		return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6831	}
 
 
 
6832
6833	return 0;
 
 
6834}
6835
6836/**
6837 * ice_bridge_getlink - Get the hardware bridge mode
6838 * @skb: skb buff
6839 * @pid: process ID
6840 * @seq: RTNL message seq
6841 * @dev: the netdev being configured
6842 * @filter_mask: filter mask passed in
6843 * @nlflags: netlink flags passed in
6844 *
6845 * Return the bridge mode (VEB/VEPA)
6846 */
6847static int
6848ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6849		   struct net_device *dev, u32 filter_mask, int nlflags)
6850{
6851	struct ice_netdev_priv *np = netdev_priv(dev);
6852	struct ice_vsi *vsi = np->vsi;
6853	struct ice_pf *pf = vsi->back;
6854	u16 bmode;
6855
6856	bmode = pf->first_sw->bridge_mode;
6857
6858	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6859				       filter_mask, NULL);
6860}
6861
6862/**
6863 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6864 * @vsi: Pointer to VSI structure
6865 * @bmode: Hardware bridge mode (VEB/VEPA)
6866 *
6867 * Returns 0 on success, negative on failure
6868 */
6869static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6870{
6871	struct ice_aqc_vsi_props *vsi_props;
6872	struct ice_hw *hw = &vsi->back->hw;
6873	struct ice_vsi_ctx *ctxt;
6874	enum ice_status status;
6875	int ret = 0;
6876
6877	vsi_props = &vsi->info;
6878
6879	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6880	if (!ctxt)
6881		return -ENOMEM;
6882
6883	ctxt->info = vsi->info;
6884
6885	if (bmode == BRIDGE_MODE_VEB)
6886		/* change from VEPA to VEB mode */
6887		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6888	else
6889		/* change from VEB to VEPA mode */
6890		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6891	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6892
6893	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6894	if (status) {
6895		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6896			bmode, ice_stat_str(status),
6897			ice_aq_str(hw->adminq.sq_last_status));
6898		ret = -EIO;
6899		goto out;
6900	}
6901	/* Update sw flags for book keeping */
6902	vsi_props->sw_flags = ctxt->info.sw_flags;
6903
6904out:
6905	kfree(ctxt);
6906	return ret;
6907}
6908
6909/**
6910 * ice_bridge_setlink - Set the hardware bridge mode
6911 * @dev: the netdev being configured
6912 * @nlh: RTNL message
6913 * @flags: bridge setlink flags
6914 * @extack: netlink extended ack
6915 *
6916 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6917 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6918 * not already set for all VSIs connected to this switch. And also update the
6919 * unicast switch filter rules for the corresponding switch of the netdev.
6920 */
6921static int
6922ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6923		   u16 __always_unused flags,
6924		   struct netlink_ext_ack __always_unused *extack)
6925{
6926	struct ice_netdev_priv *np = netdev_priv(dev);
6927	struct ice_pf *pf = np->vsi->back;
6928	struct nlattr *attr, *br_spec;
6929	struct ice_hw *hw = &pf->hw;
6930	enum ice_status status;
6931	struct ice_sw *pf_sw;
6932	int rem, v, err = 0;
6933
6934	pf_sw = pf->first_sw;
6935	/* find the attribute in the netlink message */
6936	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
 
 
6937
6938	nla_for_each_nested(attr, br_spec, rem) {
6939		__u16 mode;
6940
6941		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6942			continue;
6943		mode = nla_get_u16(attr);
6944		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6945			return -EINVAL;
6946		/* Continue  if bridge mode is not being flipped */
6947		if (mode == pf_sw->bridge_mode)
6948			continue;
6949		/* Iterates through the PF VSI list and update the loopback
6950		 * mode of the VSI
6951		 */
6952		ice_for_each_vsi(pf, v) {
6953			if (!pf->vsi[v])
6954				continue;
6955			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6956			if (err)
6957				return err;
6958		}
6959
6960		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6961		/* Update the unicast switch filter rules for the corresponding
6962		 * switch of the netdev
6963		 */
6964		status = ice_update_sw_rule_bridge_mode(hw);
6965		if (status) {
6966			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6967				   mode, ice_stat_str(status),
6968				   ice_aq_str(hw->adminq.sq_last_status));
6969			/* revert hw->evb_veb */
6970			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6971			return -EIO;
6972		}
6973
6974		pf_sw->bridge_mode = mode;
6975	}
6976
6977	return 0;
6978}
6979
6980/**
6981 * ice_tx_timeout - Respond to a Tx Hang
6982 * @netdev: network interface device structure
6983 * @txqueue: Tx queue
6984 */
6985static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6986{
6987	struct ice_netdev_priv *np = netdev_priv(netdev);
6988	struct ice_ring *tx_ring = NULL;
6989	struct ice_vsi *vsi = np->vsi;
6990	struct ice_pf *pf = vsi->back;
6991	u32 i;
6992
6993	pf->tx_timeout_count++;
6994
6995	/* Check if PFC is enabled for the TC to which the queue belongs
6996	 * to. If yes then Tx timeout is not caused by a hung queue, no
6997	 * need to reset and rebuild
6998	 */
6999	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7000		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7001			 txqueue);
7002		return;
7003	}
7004
7005	/* now that we have an index, find the tx_ring struct */
7006	for (i = 0; i < vsi->num_txq; i++)
7007		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7008			if (txqueue == vsi->tx_rings[i]->q_index) {
7009				tx_ring = vsi->tx_rings[i];
7010				break;
7011			}
7012
7013	/* Reset recovery level if enough time has elapsed after last timeout.
7014	 * Also ensure no new reset action happens before next timeout period.
7015	 */
7016	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7017		pf->tx_timeout_recovery_level = 1;
7018	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7019				       netdev->watchdog_timeo)))
7020		return;
7021
7022	if (tx_ring) {
7023		struct ice_hw *hw = &pf->hw;
7024		u32 head, val = 0;
7025
7026		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7027			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7028		/* Read interrupt register */
7029		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7030
7031		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7032			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7033			    head, tx_ring->next_to_use, val);
7034	}
7035
7036	pf->tx_timeout_last_recovery = jiffies;
7037	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7038		    pf->tx_timeout_recovery_level, txqueue);
7039
7040	switch (pf->tx_timeout_recovery_level) {
7041	case 1:
7042		set_bit(ICE_PFR_REQ, pf->state);
7043		break;
7044	case 2:
7045		set_bit(ICE_CORER_REQ, pf->state);
7046		break;
7047	case 3:
7048		set_bit(ICE_GLOBR_REQ, pf->state);
7049		break;
7050	default:
7051		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7052		set_bit(ICE_DOWN, pf->state);
7053		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7054		set_bit(ICE_SERVICE_DIS, pf->state);
7055		break;
7056	}
7057
7058	ice_service_task_schedule(pf);
7059	pf->tx_timeout_recovery_level++;
7060}
7061
7062/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7063 * ice_open - Called when a network interface becomes active
7064 * @netdev: network interface device structure
7065 *
7066 * The open entry point is called when a network interface is made
7067 * active by the system (IFF_UP). At this point all resources needed
7068 * for transmit and receive operations are allocated, the interrupt
7069 * handler is registered with the OS, the netdev watchdog is enabled,
7070 * and the stack is notified that the interface is ready.
7071 *
7072 * Returns 0 on success, negative value on failure
7073 */
7074int ice_open(struct net_device *netdev)
7075{
7076	struct ice_netdev_priv *np = netdev_priv(netdev);
7077	struct ice_pf *pf = np->vsi->back;
7078
7079	if (ice_is_reset_in_progress(pf->state)) {
7080		netdev_err(netdev, "can't open net device while reset is in progress");
7081		return -EBUSY;
7082	}
7083
7084	return ice_open_internal(netdev);
7085}
7086
7087/**
7088 * ice_open_internal - Called when a network interface becomes active
7089 * @netdev: network interface device structure
7090 *
7091 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
7092 * handling routine
7093 *
7094 * Returns 0 on success, negative value on failure
7095 */
7096int ice_open_internal(struct net_device *netdev)
7097{
7098	struct ice_netdev_priv *np = netdev_priv(netdev);
7099	struct ice_vsi *vsi = np->vsi;
7100	struct ice_pf *pf = vsi->back;
7101	struct ice_port_info *pi;
7102	enum ice_status status;
7103	int err;
7104
7105	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
7106		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
7107		return -EIO;
7108	}
7109
7110	netif_carrier_off(netdev);
7111
7112	pi = vsi->port_info;
7113	status = ice_update_link_info(pi);
7114	if (status) {
7115		netdev_err(netdev, "Failed to get link info, error %s\n",
7116			   ice_stat_str(status));
7117		return -EIO;
7118	}
7119
7120	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
7121
7122	/* Set PHY if there is media, otherwise, turn off PHY */
7123	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
7124		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7125		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
7126			err = ice_init_phy_user_cfg(pi);
7127			if (err) {
7128				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
7129					   err);
7130				return err;
7131			}
7132		}
7133
7134		err = ice_configure_phy(vsi);
7135		if (err) {
7136			netdev_err(netdev, "Failed to set physical link up, error %d\n",
7137				   err);
7138			return err;
7139		}
7140	} else {
7141		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7142		ice_set_link(vsi, false);
7143	}
7144
7145	err = ice_vsi_open(vsi);
7146	if (err)
7147		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
7148			   vsi->vsi_num, vsi->vsw->sw_id);
7149
7150	/* Update existing tunnels information */
7151	udp_tunnel_get_rx_info(netdev);
7152
7153	return err;
7154}
7155
7156/**
7157 * ice_stop - Disables a network interface
7158 * @netdev: network interface device structure
7159 *
7160 * The stop entry point is called when an interface is de-activated by the OS,
7161 * and the netdevice enters the DOWN state. The hardware is still under the
7162 * driver's control, but the netdev interface is disabled.
7163 *
7164 * Returns success only - not allowed to fail
7165 */
7166int ice_stop(struct net_device *netdev)
7167{
7168	struct ice_netdev_priv *np = netdev_priv(netdev);
7169	struct ice_vsi *vsi = np->vsi;
7170	struct ice_pf *pf = vsi->back;
7171
7172	if (ice_is_reset_in_progress(pf->state)) {
7173		netdev_err(netdev, "can't stop net device while reset is in progress");
7174		return -EBUSY;
7175	}
7176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7177	ice_vsi_close(vsi);
7178
7179	return 0;
7180}
7181
7182/**
7183 * ice_features_check - Validate encapsulated packet conforms to limits
7184 * @skb: skb buffer
7185 * @netdev: This port's netdev
7186 * @features: Offload features that the stack believes apply
7187 */
7188static netdev_features_t
7189ice_features_check(struct sk_buff *skb,
7190		   struct net_device __always_unused *netdev,
7191		   netdev_features_t features)
7192{
 
7193	size_t len;
7194
7195	/* No point in doing any of this if neither checksum nor GSO are
7196	 * being requested for this frame. We can rule out both by just
7197	 * checking for CHECKSUM_PARTIAL
7198	 */
7199	if (skb->ip_summed != CHECKSUM_PARTIAL)
7200		return features;
7201
7202	/* We cannot support GSO if the MSS is going to be less than
7203	 * 64 bytes. If it is then we need to drop support for GSO.
7204	 */
7205	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
7206		features &= ~NETIF_F_GSO_MASK;
7207
7208	len = skb_network_header(skb) - skb->data;
7209	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
7210		goto out_rm_features;
7211
7212	len = skb_transport_header(skb) - skb_network_header(skb);
7213	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7214		goto out_rm_features;
7215
7216	if (skb->encapsulation) {
7217		len = skb_inner_network_header(skb) - skb_transport_header(skb);
7218		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
7219			goto out_rm_features;
 
 
 
 
 
 
 
 
 
7220
7221		len = skb_inner_transport_header(skb) -
7222		      skb_inner_network_header(skb);
7223		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7224			goto out_rm_features;
7225	}
7226
7227	return features;
7228out_rm_features:
7229	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
7230}
7231
7232static const struct net_device_ops ice_netdev_safe_mode_ops = {
7233	.ndo_open = ice_open,
7234	.ndo_stop = ice_stop,
7235	.ndo_start_xmit = ice_start_xmit,
7236	.ndo_set_mac_address = ice_set_mac_address,
7237	.ndo_validate_addr = eth_validate_addr,
7238	.ndo_change_mtu = ice_change_mtu,
7239	.ndo_get_stats64 = ice_get_stats64,
7240	.ndo_tx_timeout = ice_tx_timeout,
7241	.ndo_bpf = ice_xdp_safe_mode,
7242};
7243
7244static const struct net_device_ops ice_netdev_ops = {
7245	.ndo_open = ice_open,
7246	.ndo_stop = ice_stop,
7247	.ndo_start_xmit = ice_start_xmit,
 
7248	.ndo_features_check = ice_features_check,
 
7249	.ndo_set_rx_mode = ice_set_rx_mode,
7250	.ndo_set_mac_address = ice_set_mac_address,
7251	.ndo_validate_addr = eth_validate_addr,
7252	.ndo_change_mtu = ice_change_mtu,
7253	.ndo_get_stats64 = ice_get_stats64,
7254	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
7255	.ndo_do_ioctl = ice_do_ioctl,
7256	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
7257	.ndo_set_vf_mac = ice_set_vf_mac,
7258	.ndo_get_vf_config = ice_get_vf_cfg,
7259	.ndo_set_vf_trust = ice_set_vf_trust,
7260	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
7261	.ndo_set_vf_link_state = ice_set_vf_link_state,
7262	.ndo_get_vf_stats = ice_get_vf_stats,
 
7263	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
7264	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
 
7265	.ndo_set_features = ice_set_features,
7266	.ndo_bridge_getlink = ice_bridge_getlink,
7267	.ndo_bridge_setlink = ice_bridge_setlink,
7268	.ndo_fdb_add = ice_fdb_add,
7269	.ndo_fdb_del = ice_fdb_del,
7270#ifdef CONFIG_RFS_ACCEL
7271	.ndo_rx_flow_steer = ice_rx_flow_steer,
7272#endif
7273	.ndo_tx_timeout = ice_tx_timeout,
7274	.ndo_bpf = ice_xdp,
7275	.ndo_xdp_xmit = ice_xdp_xmit,
7276	.ndo_xsk_wakeup = ice_xsk_wakeup,
7277};