Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2017 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
  11#include <linux/bitops.h>
  12#include <linux/ctype.h>
  13#include <linux/stringify.h>
  14#include <linux/ethtool.h>
  15#include <linux/ethtool_netlink.h>
  16#include <linux/linkmode.h>
  17#include <linux/interrupt.h>
  18#include <linux/pci.h>
  19#include <linux/etherdevice.h>
  20#include <linux/crc32.h>
  21#include <linux/firmware.h>
  22#include <linux/utsname.h>
  23#include <linux/time.h>
  24#include <linux/ptp_clock_kernel.h>
  25#include <linux/net_tstamp.h>
  26#include <linux/timecounter.h>
  27#include <net/netlink.h>
  28#include "bnxt_hsi.h"
  29#include "bnxt.h"
  30#include "bnxt_hwrm.h"
  31#include "bnxt_ulp.h"
  32#include "bnxt_xdp.h"
  33#include "bnxt_ptp.h"
  34#include "bnxt_ethtool.h"
  35#include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */
  36#include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */
  37#include "bnxt_coredump.h"
  38
  39#define BNXT_NVM_ERR_MSG(dev, extack, msg)			\
  40	do {							\
  41		if (extack)					\
  42			NL_SET_ERR_MSG_MOD(extack, msg);	\
  43		netdev_err(dev, "%s\n", msg);			\
  44	} while (0)
  45
  46static u32 bnxt_get_msglevel(struct net_device *dev)
  47{
  48	struct bnxt *bp = netdev_priv(dev);
  49
  50	return bp->msg_enable;
  51}
  52
  53static void bnxt_set_msglevel(struct net_device *dev, u32 value)
  54{
  55	struct bnxt *bp = netdev_priv(dev);
  56
  57	bp->msg_enable = value;
  58}
  59
  60static int bnxt_get_coalesce(struct net_device *dev,
  61			     struct ethtool_coalesce *coal,
  62			     struct kernel_ethtool_coalesce *kernel_coal,
  63			     struct netlink_ext_ack *extack)
  64{
  65	struct bnxt *bp = netdev_priv(dev);
  66	struct bnxt_coal *hw_coal;
  67	u16 mult;
  68
  69	memset(coal, 0, sizeof(*coal));
  70
  71	coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
  72
  73	hw_coal = &bp->rx_coal;
  74	mult = hw_coal->bufs_per_record;
  75	coal->rx_coalesce_usecs = hw_coal->coal_ticks;
  76	coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  77	coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  78	coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
  79	if (hw_coal->flags &
  80	    RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
  81		kernel_coal->use_cqe_mode_rx = true;
  82
  83	hw_coal = &bp->tx_coal;
  84	mult = hw_coal->bufs_per_record;
  85	coal->tx_coalesce_usecs = hw_coal->coal_ticks;
  86	coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
  87	coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
  88	coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
  89	if (hw_coal->flags &
  90	    RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
  91		kernel_coal->use_cqe_mode_tx = true;
  92
  93	coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
  94
  95	return 0;
  96}
  97
  98static int bnxt_set_coalesce(struct net_device *dev,
  99			     struct ethtool_coalesce *coal,
 100			     struct kernel_ethtool_coalesce *kernel_coal,
 101			     struct netlink_ext_ack *extack)
 102{
 103	struct bnxt *bp = netdev_priv(dev);
 104	bool update_stats = false;
 105	struct bnxt_coal *hw_coal;
 106	int rc = 0;
 107	u16 mult;
 108
 109	if (coal->use_adaptive_rx_coalesce) {
 110		bp->flags |= BNXT_FLAG_DIM;
 111	} else {
 112		if (bp->flags & BNXT_FLAG_DIM) {
 113			bp->flags &= ~(BNXT_FLAG_DIM);
 114			goto reset_coalesce;
 115		}
 116	}
 117
 118	if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
 119	    !(bp->coal_cap.cmpl_params &
 120	      RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
 121		return -EOPNOTSUPP;
 122
 123	hw_coal = &bp->rx_coal;
 124	mult = hw_coal->bufs_per_record;
 125	hw_coal->coal_ticks = coal->rx_coalesce_usecs;
 126	hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
 127	hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
 128	hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
 129	hw_coal->flags &=
 130		~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 131	if (kernel_coal->use_cqe_mode_rx)
 132		hw_coal->flags |=
 133			RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 134
 135	hw_coal = &bp->tx_coal;
 136	mult = hw_coal->bufs_per_record;
 137	hw_coal->coal_ticks = coal->tx_coalesce_usecs;
 138	hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
 139	hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
 140	hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
 141	hw_coal->flags &=
 142		~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 143	if (kernel_coal->use_cqe_mode_tx)
 144		hw_coal->flags |=
 145			RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
 146
 147	if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
 148		u32 stats_ticks = coal->stats_block_coalesce_usecs;
 149
 150		/* Allow 0, which means disable. */
 151		if (stats_ticks)
 152			stats_ticks = clamp_t(u32, stats_ticks,
 153					      BNXT_MIN_STATS_COAL_TICKS,
 154					      BNXT_MAX_STATS_COAL_TICKS);
 155		stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
 156		bp->stats_coal_ticks = stats_ticks;
 157		if (bp->stats_coal_ticks)
 158			bp->current_interval =
 159				bp->stats_coal_ticks * HZ / 1000000;
 160		else
 161			bp->current_interval = BNXT_TIMER_INTERVAL;
 162		update_stats = true;
 163	}
 164
 165reset_coalesce:
 166	if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
 167		if (update_stats) {
 168			bnxt_close_nic(bp, true, false);
 169			rc = bnxt_open_nic(bp, true, false);
 170		} else {
 171			rc = bnxt_hwrm_set_coal(bp);
 172		}
 173	}
 174
 175	return rc;
 176}
 177
 178static const char * const bnxt_ring_rx_stats_str[] = {
 179	"rx_ucast_packets",
 180	"rx_mcast_packets",
 181	"rx_bcast_packets",
 182	"rx_discards",
 183	"rx_errors",
 184	"rx_ucast_bytes",
 185	"rx_mcast_bytes",
 186	"rx_bcast_bytes",
 187};
 188
 189static const char * const bnxt_ring_tx_stats_str[] = {
 190	"tx_ucast_packets",
 191	"tx_mcast_packets",
 192	"tx_bcast_packets",
 193	"tx_errors",
 194	"tx_discards",
 195	"tx_ucast_bytes",
 196	"tx_mcast_bytes",
 197	"tx_bcast_bytes",
 198};
 199
 200static const char * const bnxt_ring_tpa_stats_str[] = {
 201	"tpa_packets",
 202	"tpa_bytes",
 203	"tpa_events",
 204	"tpa_aborts",
 205};
 206
 207static const char * const bnxt_ring_tpa2_stats_str[] = {
 208	"rx_tpa_eligible_pkt",
 209	"rx_tpa_eligible_bytes",
 210	"rx_tpa_pkt",
 211	"rx_tpa_bytes",
 212	"rx_tpa_errors",
 213	"rx_tpa_events",
 214};
 215
 216static const char * const bnxt_rx_sw_stats_str[] = {
 217	"rx_l4_csum_errors",
 218	"rx_resets",
 219	"rx_buf_errors",
 220};
 221
 222static const char * const bnxt_cmn_sw_stats_str[] = {
 223	"missed_irqs",
 224};
 225
 226#define BNXT_RX_STATS_ENTRY(counter)	\
 227	{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
 228
 229#define BNXT_TX_STATS_ENTRY(counter)	\
 230	{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
 231
 232#define BNXT_RX_STATS_EXT_ENTRY(counter)	\
 233	{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 234
 235#define BNXT_TX_STATS_EXT_ENTRY(counter)	\
 236	{ BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
 237
 238#define BNXT_RX_STATS_EXT_PFC_ENTRY(n)				\
 239	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us),	\
 240	BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
 241
 242#define BNXT_TX_STATS_EXT_PFC_ENTRY(n)				\
 243	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us),	\
 244	BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
 245
 246#define BNXT_RX_STATS_EXT_PFC_ENTRIES				\
 247	BNXT_RX_STATS_EXT_PFC_ENTRY(0),				\
 248	BNXT_RX_STATS_EXT_PFC_ENTRY(1),				\
 249	BNXT_RX_STATS_EXT_PFC_ENTRY(2),				\
 250	BNXT_RX_STATS_EXT_PFC_ENTRY(3),				\
 251	BNXT_RX_STATS_EXT_PFC_ENTRY(4),				\
 252	BNXT_RX_STATS_EXT_PFC_ENTRY(5),				\
 253	BNXT_RX_STATS_EXT_PFC_ENTRY(6),				\
 254	BNXT_RX_STATS_EXT_PFC_ENTRY(7)
 255
 256#define BNXT_TX_STATS_EXT_PFC_ENTRIES				\
 257	BNXT_TX_STATS_EXT_PFC_ENTRY(0),				\
 258	BNXT_TX_STATS_EXT_PFC_ENTRY(1),				\
 259	BNXT_TX_STATS_EXT_PFC_ENTRY(2),				\
 260	BNXT_TX_STATS_EXT_PFC_ENTRY(3),				\
 261	BNXT_TX_STATS_EXT_PFC_ENTRY(4),				\
 262	BNXT_TX_STATS_EXT_PFC_ENTRY(5),				\
 263	BNXT_TX_STATS_EXT_PFC_ENTRY(6),				\
 264	BNXT_TX_STATS_EXT_PFC_ENTRY(7)
 265
 266#define BNXT_RX_STATS_EXT_COS_ENTRY(n)				\
 267	BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n),		\
 268	BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
 269
 270#define BNXT_TX_STATS_EXT_COS_ENTRY(n)				\
 271	BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n),		\
 272	BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
 273
 274#define BNXT_RX_STATS_EXT_COS_ENTRIES				\
 275	BNXT_RX_STATS_EXT_COS_ENTRY(0),				\
 276	BNXT_RX_STATS_EXT_COS_ENTRY(1),				\
 277	BNXT_RX_STATS_EXT_COS_ENTRY(2),				\
 278	BNXT_RX_STATS_EXT_COS_ENTRY(3),				\
 279	BNXT_RX_STATS_EXT_COS_ENTRY(4),				\
 280	BNXT_RX_STATS_EXT_COS_ENTRY(5),				\
 281	BNXT_RX_STATS_EXT_COS_ENTRY(6),				\
 282	BNXT_RX_STATS_EXT_COS_ENTRY(7)				\
 283
 284#define BNXT_TX_STATS_EXT_COS_ENTRIES				\
 285	BNXT_TX_STATS_EXT_COS_ENTRY(0),				\
 286	BNXT_TX_STATS_EXT_COS_ENTRY(1),				\
 287	BNXT_TX_STATS_EXT_COS_ENTRY(2),				\
 288	BNXT_TX_STATS_EXT_COS_ENTRY(3),				\
 289	BNXT_TX_STATS_EXT_COS_ENTRY(4),				\
 290	BNXT_TX_STATS_EXT_COS_ENTRY(5),				\
 291	BNXT_TX_STATS_EXT_COS_ENTRY(6),				\
 292	BNXT_TX_STATS_EXT_COS_ENTRY(7)				\
 293
 294#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n)			\
 295	BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n),	\
 296	BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
 297
 298#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES				\
 299	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0),				\
 300	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1),				\
 301	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2),				\
 302	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3),				\
 303	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4),				\
 304	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5),				\
 305	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6),				\
 306	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
 307
 308#define BNXT_RX_STATS_PRI_ENTRY(counter, n)		\
 309	{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0),	\
 310	  __stringify(counter##_pri##n) }
 311
 312#define BNXT_TX_STATS_PRI_ENTRY(counter, n)		\
 313	{ BNXT_TX_STATS_EXT_OFFSET(counter##_cos0),	\
 314	  __stringify(counter##_pri##n) }
 315
 316#define BNXT_RX_STATS_PRI_ENTRIES(counter)		\
 317	BNXT_RX_STATS_PRI_ENTRY(counter, 0),		\
 318	BNXT_RX_STATS_PRI_ENTRY(counter, 1),		\
 319	BNXT_RX_STATS_PRI_ENTRY(counter, 2),		\
 320	BNXT_RX_STATS_PRI_ENTRY(counter, 3),		\
 321	BNXT_RX_STATS_PRI_ENTRY(counter, 4),		\
 322	BNXT_RX_STATS_PRI_ENTRY(counter, 5),		\
 323	BNXT_RX_STATS_PRI_ENTRY(counter, 6),		\
 324	BNXT_RX_STATS_PRI_ENTRY(counter, 7)
 325
 326#define BNXT_TX_STATS_PRI_ENTRIES(counter)		\
 327	BNXT_TX_STATS_PRI_ENTRY(counter, 0),		\
 328	BNXT_TX_STATS_PRI_ENTRY(counter, 1),		\
 329	BNXT_TX_STATS_PRI_ENTRY(counter, 2),		\
 330	BNXT_TX_STATS_PRI_ENTRY(counter, 3),		\
 331	BNXT_TX_STATS_PRI_ENTRY(counter, 4),		\
 332	BNXT_TX_STATS_PRI_ENTRY(counter, 5),		\
 333	BNXT_TX_STATS_PRI_ENTRY(counter, 6),		\
 334	BNXT_TX_STATS_PRI_ENTRY(counter, 7)
 335
 336enum {
 337	RX_TOTAL_DISCARDS,
 338	TX_TOTAL_DISCARDS,
 339	RX_NETPOLL_DISCARDS,
 340};
 341
 342static const char *const bnxt_ring_err_stats_arr[] = {
 343	"rx_total_l4_csum_errors",
 344	"rx_total_resets",
 345	"rx_total_buf_errors",
 346	"rx_total_oom_discards",
 347	"rx_total_netpoll_discards",
 348	"rx_total_ring_discards",
 349	"tx_total_resets",
 350	"tx_total_ring_discards",
 351	"total_missed_irqs",
 352};
 353
 354#define NUM_RING_RX_SW_STATS		ARRAY_SIZE(bnxt_rx_sw_stats_str)
 355#define NUM_RING_CMN_SW_STATS		ARRAY_SIZE(bnxt_cmn_sw_stats_str)
 356#define NUM_RING_RX_HW_STATS		ARRAY_SIZE(bnxt_ring_rx_stats_str)
 357#define NUM_RING_TX_HW_STATS		ARRAY_SIZE(bnxt_ring_tx_stats_str)
 358
 359static const struct {
 360	long offset;
 361	char string[ETH_GSTRING_LEN];
 362} bnxt_port_stats_arr[] = {
 363	BNXT_RX_STATS_ENTRY(rx_64b_frames),
 364	BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
 365	BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
 366	BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
 367	BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
 368	BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
 369	BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
 370	BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
 371	BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
 372	BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
 373	BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
 374	BNXT_RX_STATS_ENTRY(rx_total_frames),
 375	BNXT_RX_STATS_ENTRY(rx_ucast_frames),
 376	BNXT_RX_STATS_ENTRY(rx_mcast_frames),
 377	BNXT_RX_STATS_ENTRY(rx_bcast_frames),
 378	BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
 379	BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
 380	BNXT_RX_STATS_ENTRY(rx_pause_frames),
 381	BNXT_RX_STATS_ENTRY(rx_pfc_frames),
 382	BNXT_RX_STATS_ENTRY(rx_align_err_frames),
 383	BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
 384	BNXT_RX_STATS_ENTRY(rx_jbr_frames),
 385	BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
 386	BNXT_RX_STATS_ENTRY(rx_tagged_frames),
 387	BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
 388	BNXT_RX_STATS_ENTRY(rx_good_frames),
 389	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
 390	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
 391	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
 392	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
 393	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
 394	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
 395	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
 396	BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
 397	BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
 398	BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
 399	BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
 400	BNXT_RX_STATS_ENTRY(rx_bytes),
 401	BNXT_RX_STATS_ENTRY(rx_runt_bytes),
 402	BNXT_RX_STATS_ENTRY(rx_runt_frames),
 403	BNXT_RX_STATS_ENTRY(rx_stat_discard),
 404	BNXT_RX_STATS_ENTRY(rx_stat_err),
 405
 406	BNXT_TX_STATS_ENTRY(tx_64b_frames),
 407	BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
 408	BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
 409	BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
 410	BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
 411	BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
 412	BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
 413	BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
 414	BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
 415	BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
 416	BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
 417	BNXT_TX_STATS_ENTRY(tx_good_frames),
 418	BNXT_TX_STATS_ENTRY(tx_total_frames),
 419	BNXT_TX_STATS_ENTRY(tx_ucast_frames),
 420	BNXT_TX_STATS_ENTRY(tx_mcast_frames),
 421	BNXT_TX_STATS_ENTRY(tx_bcast_frames),
 422	BNXT_TX_STATS_ENTRY(tx_pause_frames),
 423	BNXT_TX_STATS_ENTRY(tx_pfc_frames),
 424	BNXT_TX_STATS_ENTRY(tx_jabber_frames),
 425	BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
 426	BNXT_TX_STATS_ENTRY(tx_err),
 427	BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
 428	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
 429	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
 430	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
 431	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
 432	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
 433	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
 434	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
 435	BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
 436	BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
 437	BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
 438	BNXT_TX_STATS_ENTRY(tx_total_collisions),
 439	BNXT_TX_STATS_ENTRY(tx_bytes),
 440	BNXT_TX_STATS_ENTRY(tx_xthol_frames),
 441	BNXT_TX_STATS_ENTRY(tx_stat_discard),
 442	BNXT_TX_STATS_ENTRY(tx_stat_error),
 443};
 444
 445static const struct {
 446	long offset;
 447	char string[ETH_GSTRING_LEN];
 448} bnxt_port_stats_ext_arr[] = {
 449	BNXT_RX_STATS_EXT_ENTRY(link_down_events),
 450	BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
 451	BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
 452	BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
 453	BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
 454	BNXT_RX_STATS_EXT_COS_ENTRIES,
 455	BNXT_RX_STATS_EXT_PFC_ENTRIES,
 456	BNXT_RX_STATS_EXT_ENTRY(rx_bits),
 457	BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
 458	BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
 459	BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
 460	BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
 461	BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
 462	BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
 463	BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
 464};
 465
 466static const struct {
 467	long offset;
 468	char string[ETH_GSTRING_LEN];
 469} bnxt_tx_port_stats_ext_arr[] = {
 470	BNXT_TX_STATS_EXT_COS_ENTRIES,
 471	BNXT_TX_STATS_EXT_PFC_ENTRIES,
 472};
 473
 474static const struct {
 475	long base_off;
 476	char string[ETH_GSTRING_LEN];
 477} bnxt_rx_bytes_pri_arr[] = {
 478	BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
 479};
 480
 481static const struct {
 482	long base_off;
 483	char string[ETH_GSTRING_LEN];
 484} bnxt_rx_pkts_pri_arr[] = {
 485	BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
 486};
 487
 488static const struct {
 489	long base_off;
 490	char string[ETH_GSTRING_LEN];
 491} bnxt_tx_bytes_pri_arr[] = {
 492	BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
 493};
 494
 495static const struct {
 496	long base_off;
 497	char string[ETH_GSTRING_LEN];
 498} bnxt_tx_pkts_pri_arr[] = {
 499	BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
 500};
 501
 502#define BNXT_NUM_RING_ERR_STATS	ARRAY_SIZE(bnxt_ring_err_stats_arr)
 503#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
 504#define BNXT_NUM_STATS_PRI			\
 505	(ARRAY_SIZE(bnxt_rx_bytes_pri_arr) +	\
 506	 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) +	\
 507	 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) +	\
 508	 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
 509
 510static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
 511{
 512	if (BNXT_SUPPORTS_TPA(bp)) {
 513		if (bp->max_tpa_v2) {
 514			if (BNXT_CHIP_P5(bp))
 515				return BNXT_NUM_TPA_RING_STATS_P5;
 516			return BNXT_NUM_TPA_RING_STATS_P7;
 517		}
 518		return BNXT_NUM_TPA_RING_STATS;
 519	}
 520	return 0;
 521}
 522
 523static int bnxt_get_num_ring_stats(struct bnxt *bp)
 524{
 525	int rx, tx, cmn;
 526
 527	rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
 528	     bnxt_get_num_tpa_ring_stats(bp);
 529	tx = NUM_RING_TX_HW_STATS;
 530	cmn = NUM_RING_CMN_SW_STATS;
 531	return rx * bp->rx_nr_rings +
 532	       tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
 533	       cmn * bp->cp_nr_rings;
 534}
 535
 536static int bnxt_get_num_stats(struct bnxt *bp)
 537{
 538	int num_stats = bnxt_get_num_ring_stats(bp);
 539	int len;
 540
 541	num_stats += BNXT_NUM_RING_ERR_STATS;
 542
 543	if (bp->flags & BNXT_FLAG_PORT_STATS)
 544		num_stats += BNXT_NUM_PORT_STATS;
 545
 546	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 547		len = min_t(int, bp->fw_rx_stats_ext_size,
 548			    ARRAY_SIZE(bnxt_port_stats_ext_arr));
 549		num_stats += len;
 550		len = min_t(int, bp->fw_tx_stats_ext_size,
 551			    ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
 552		num_stats += len;
 553		if (bp->pri2cos_valid)
 554			num_stats += BNXT_NUM_STATS_PRI;
 555	}
 556
 557	return num_stats;
 558}
 559
 560static int bnxt_get_sset_count(struct net_device *dev, int sset)
 561{
 562	struct bnxt *bp = netdev_priv(dev);
 563
 564	switch (sset) {
 565	case ETH_SS_STATS:
 566		return bnxt_get_num_stats(bp);
 567	case ETH_SS_TEST:
 568		if (!bp->num_tests)
 569			return -EOPNOTSUPP;
 570		return bp->num_tests;
 571	default:
 572		return -EOPNOTSUPP;
 573	}
 574}
 575
 576static bool is_rx_ring(struct bnxt *bp, int ring_num)
 577{
 578	return ring_num < bp->rx_nr_rings;
 579}
 580
 581static bool is_tx_ring(struct bnxt *bp, int ring_num)
 582{
 583	int tx_base = 0;
 584
 585	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
 586		tx_base = bp->rx_nr_rings;
 587
 588	if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
 589		return true;
 590	return false;
 591}
 592
 593static void bnxt_get_ethtool_stats(struct net_device *dev,
 594				   struct ethtool_stats *stats, u64 *buf)
 595{
 596	struct bnxt_total_ring_err_stats ring_err_stats = {0};
 597	struct bnxt *bp = netdev_priv(dev);
 598	u64 *curr, *prev;
 599	u32 tpa_stats;
 600	u32 i, j = 0;
 601
 602	if (!bp->bnapi) {
 603		j += bnxt_get_num_ring_stats(bp);
 604		goto skip_ring_stats;
 605	}
 606
 607	tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
 608	for (i = 0; i < bp->cp_nr_rings; i++) {
 609		struct bnxt_napi *bnapi = bp->bnapi[i];
 610		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 611		u64 *sw_stats = cpr->stats.sw_stats;
 612		u64 *sw;
 613		int k;
 614
 615		if (is_rx_ring(bp, i)) {
 616			for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
 617				buf[j] = sw_stats[k];
 618		}
 619		if (is_tx_ring(bp, i)) {
 620			k = NUM_RING_RX_HW_STATS;
 621			for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
 622			       j++, k++)
 623				buf[j] = sw_stats[k];
 624		}
 625		if (!tpa_stats || !is_rx_ring(bp, i))
 626			goto skip_tpa_ring_stats;
 627
 628		k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
 629		for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
 630			   tpa_stats; j++, k++)
 631			buf[j] = sw_stats[k];
 632
 633skip_tpa_ring_stats:
 634		sw = (u64 *)&cpr->sw_stats.rx;
 635		if (is_rx_ring(bp, i)) {
 636			for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
 637				buf[j] = sw[k];
 638		}
 639
 640		sw = (u64 *)&cpr->sw_stats.cmn;
 641		for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
 642			buf[j] = sw[k];
 643	}
 644
 645	bnxt_get_ring_err_stats(bp, &ring_err_stats);
 646
 647skip_ring_stats:
 648	curr = &ring_err_stats.rx_total_l4_csum_errors;
 649	prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
 650	for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
 651		buf[j] = *curr + *prev;
 652
 653	if (bp->flags & BNXT_FLAG_PORT_STATS) {
 654		u64 *port_stats = bp->port_stats.sw_stats;
 655
 656		for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
 657			buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
 658	}
 659	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 660		u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
 661		u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
 662		u32 len;
 663
 664		len = min_t(u32, bp->fw_rx_stats_ext_size,
 665			    ARRAY_SIZE(bnxt_port_stats_ext_arr));
 666		for (i = 0; i < len; i++, j++) {
 667			buf[j] = *(rx_port_stats_ext +
 668				   bnxt_port_stats_ext_arr[i].offset);
 669		}
 670		len = min_t(u32, bp->fw_tx_stats_ext_size,
 671			    ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
 672		for (i = 0; i < len; i++, j++) {
 673			buf[j] = *(tx_port_stats_ext +
 674				   bnxt_tx_port_stats_ext_arr[i].offset);
 675		}
 676		if (bp->pri2cos_valid) {
 677			for (i = 0; i < 8; i++, j++) {
 678				long n = bnxt_rx_bytes_pri_arr[i].base_off +
 679					 bp->pri2cos_idx[i];
 680
 681				buf[j] = *(rx_port_stats_ext + n);
 682			}
 683			for (i = 0; i < 8; i++, j++) {
 684				long n = bnxt_rx_pkts_pri_arr[i].base_off +
 685					 bp->pri2cos_idx[i];
 686
 687				buf[j] = *(rx_port_stats_ext + n);
 688			}
 689			for (i = 0; i < 8; i++, j++) {
 690				long n = bnxt_tx_bytes_pri_arr[i].base_off +
 691					 bp->pri2cos_idx[i];
 692
 693				buf[j] = *(tx_port_stats_ext + n);
 694			}
 695			for (i = 0; i < 8; i++, j++) {
 696				long n = bnxt_tx_pkts_pri_arr[i].base_off +
 697					 bp->pri2cos_idx[i];
 698
 699				buf[j] = *(tx_port_stats_ext + n);
 700			}
 701		}
 702	}
 703}
 704
 705static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 706{
 707	struct bnxt *bp = netdev_priv(dev);
 708	static const char * const *str;
 709	u32 i, j, num_str;
 710
 711	switch (stringset) {
 712	case ETH_SS_STATS:
 713		for (i = 0; i < bp->cp_nr_rings; i++) {
 714			if (is_rx_ring(bp, i)) {
 715				num_str = NUM_RING_RX_HW_STATS;
 716				for (j = 0; j < num_str; j++) {
 717					sprintf(buf, "[%d]: %s", i,
 718						bnxt_ring_rx_stats_str[j]);
 719					buf += ETH_GSTRING_LEN;
 720				}
 721			}
 722			if (is_tx_ring(bp, i)) {
 723				num_str = NUM_RING_TX_HW_STATS;
 724				for (j = 0; j < num_str; j++) {
 725					sprintf(buf, "[%d]: %s", i,
 726						bnxt_ring_tx_stats_str[j]);
 727					buf += ETH_GSTRING_LEN;
 728				}
 729			}
 730			num_str = bnxt_get_num_tpa_ring_stats(bp);
 731			if (!num_str || !is_rx_ring(bp, i))
 732				goto skip_tpa_stats;
 733
 734			if (bp->max_tpa_v2)
 735				str = bnxt_ring_tpa2_stats_str;
 736			else
 737				str = bnxt_ring_tpa_stats_str;
 738
 739			for (j = 0; j < num_str; j++) {
 740				sprintf(buf, "[%d]: %s", i, str[j]);
 741				buf += ETH_GSTRING_LEN;
 742			}
 743skip_tpa_stats:
 744			if (is_rx_ring(bp, i)) {
 745				num_str = NUM_RING_RX_SW_STATS;
 746				for (j = 0; j < num_str; j++) {
 747					sprintf(buf, "[%d]: %s", i,
 748						bnxt_rx_sw_stats_str[j]);
 749					buf += ETH_GSTRING_LEN;
 750				}
 751			}
 752			num_str = NUM_RING_CMN_SW_STATS;
 753			for (j = 0; j < num_str; j++) {
 754				sprintf(buf, "[%d]: %s", i,
 755					bnxt_cmn_sw_stats_str[j]);
 756				buf += ETH_GSTRING_LEN;
 757			}
 758		}
 759		for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
 760			strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
 761			buf += ETH_GSTRING_LEN;
 762		}
 763
 764		if (bp->flags & BNXT_FLAG_PORT_STATS) {
 765			for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
 766				strcpy(buf, bnxt_port_stats_arr[i].string);
 767				buf += ETH_GSTRING_LEN;
 768			}
 769		}
 770		if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
 771			u32 len;
 772
 773			len = min_t(u32, bp->fw_rx_stats_ext_size,
 774				    ARRAY_SIZE(bnxt_port_stats_ext_arr));
 775			for (i = 0; i < len; i++) {
 776				strcpy(buf, bnxt_port_stats_ext_arr[i].string);
 777				buf += ETH_GSTRING_LEN;
 778			}
 779			len = min_t(u32, bp->fw_tx_stats_ext_size,
 780				    ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
 781			for (i = 0; i < len; i++) {
 782				strcpy(buf,
 783				       bnxt_tx_port_stats_ext_arr[i].string);
 784				buf += ETH_GSTRING_LEN;
 785			}
 786			if (bp->pri2cos_valid) {
 787				for (i = 0; i < 8; i++) {
 788					strcpy(buf,
 789					       bnxt_rx_bytes_pri_arr[i].string);
 790					buf += ETH_GSTRING_LEN;
 791				}
 792				for (i = 0; i < 8; i++) {
 793					strcpy(buf,
 794					       bnxt_rx_pkts_pri_arr[i].string);
 795					buf += ETH_GSTRING_LEN;
 796				}
 797				for (i = 0; i < 8; i++) {
 798					strcpy(buf,
 799					       bnxt_tx_bytes_pri_arr[i].string);
 800					buf += ETH_GSTRING_LEN;
 801				}
 802				for (i = 0; i < 8; i++) {
 803					strcpy(buf,
 804					       bnxt_tx_pkts_pri_arr[i].string);
 805					buf += ETH_GSTRING_LEN;
 806				}
 807			}
 808		}
 809		break;
 810	case ETH_SS_TEST:
 811		if (bp->num_tests)
 812			memcpy(buf, bp->test_info->string,
 813			       bp->num_tests * ETH_GSTRING_LEN);
 814		break;
 815	default:
 816		netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
 817			   stringset);
 818		break;
 819	}
 820}
 821
 822static void bnxt_get_ringparam(struct net_device *dev,
 823			       struct ethtool_ringparam *ering,
 824			       struct kernel_ethtool_ringparam *kernel_ering,
 825			       struct netlink_ext_ack *extack)
 826{
 827	struct bnxt *bp = netdev_priv(dev);
 828
 829	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
 830		ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
 831		ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
 832		kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
 833	} else {
 834		ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
 835		ering->rx_jumbo_max_pending = 0;
 836		kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
 837	}
 838	ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
 839
 840	ering->rx_pending = bp->rx_ring_size;
 841	ering->rx_jumbo_pending = bp->rx_agg_ring_size;
 842	ering->tx_pending = bp->tx_ring_size;
 843}
 844
 845static int bnxt_set_ringparam(struct net_device *dev,
 846			      struct ethtool_ringparam *ering,
 847			      struct kernel_ethtool_ringparam *kernel_ering,
 848			      struct netlink_ext_ack *extack)
 849{
 850	struct bnxt *bp = netdev_priv(dev);
 851
 852	if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
 853	    (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
 854	    (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
 855		return -EINVAL;
 856
 857	if (netif_running(dev))
 858		bnxt_close_nic(bp, false, false);
 859
 860	bp->rx_ring_size = ering->rx_pending;
 861	bp->tx_ring_size = ering->tx_pending;
 862	bnxt_set_ring_params(bp);
 863
 864	if (netif_running(dev))
 865		return bnxt_open_nic(bp, false, false);
 866
 867	return 0;
 868}
 869
 870static void bnxt_get_channels(struct net_device *dev,
 871			      struct ethtool_channels *channel)
 872{
 873	struct bnxt *bp = netdev_priv(dev);
 874	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 875	int max_rx_rings, max_tx_rings, tcs;
 876	int max_tx_sch_inputs, tx_grps;
 877
 878	/* Get the most up-to-date max_tx_sch_inputs. */
 879	if (netif_running(dev) && BNXT_NEW_RM(bp))
 880		bnxt_hwrm_func_resc_qcaps(bp, false);
 881	max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
 882
 883	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
 884	if (max_tx_sch_inputs)
 885		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 886
 887	tcs = bp->num_tc;
 888	tx_grps = max(tcs, 1);
 889	if (bp->tx_nr_rings_xdp)
 890		tx_grps++;
 891	max_tx_rings /= tx_grps;
 892	channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
 893
 894	if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
 895		max_rx_rings = 0;
 896		max_tx_rings = 0;
 897	}
 898	if (max_tx_sch_inputs)
 899		max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 900
 901	if (tcs > 1)
 902		max_tx_rings /= tcs;
 903
 904	channel->max_rx = max_rx_rings;
 905	channel->max_tx = max_tx_rings;
 906	channel->max_other = 0;
 907	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
 908		channel->combined_count = bp->rx_nr_rings;
 909		if (BNXT_CHIP_TYPE_NITRO_A0(bp))
 910			channel->combined_count--;
 911	} else {
 912		if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
 913			channel->rx_count = bp->rx_nr_rings;
 914			channel->tx_count = bp->tx_nr_rings_per_tc;
 915		}
 916	}
 917}
 918
 919static int bnxt_set_channels(struct net_device *dev,
 920			     struct ethtool_channels *channel)
 921{
 922	struct bnxt *bp = netdev_priv(dev);
 923	int req_tx_rings, req_rx_rings, tcs;
 924	bool sh = false;
 925	int tx_xdp = 0;
 926	int rc = 0;
 927	int tx_cp;
 928
 929	if (channel->other_count)
 930		return -EINVAL;
 931
 932	if (!channel->combined_count &&
 933	    (!channel->rx_count || !channel->tx_count))
 934		return -EINVAL;
 935
 936	if (channel->combined_count &&
 937	    (channel->rx_count || channel->tx_count))
 938		return -EINVAL;
 939
 940	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
 941					    channel->tx_count))
 942		return -EINVAL;
 943
 944	if (channel->combined_count)
 945		sh = true;
 946
 947	tcs = bp->num_tc;
 948
 949	req_tx_rings = sh ? channel->combined_count : channel->tx_count;
 950	req_rx_rings = sh ? channel->combined_count : channel->rx_count;
 951	if (bp->tx_nr_rings_xdp) {
 952		if (!sh) {
 953			netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
 954			return -EINVAL;
 955		}
 956		tx_xdp = req_rx_rings;
 957	}
 958	rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
 959	if (rc) {
 960		netdev_warn(dev, "Unable to allocate the requested rings\n");
 961		return rc;
 962	}
 963
 964	if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
 965	    bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
 966	    netif_is_rxfh_configured(dev)) {
 967		netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
 968		return -EINVAL;
 969	}
 970
 971	if (netif_running(dev)) {
 972		if (BNXT_PF(bp)) {
 973			/* TODO CHIMP_FW: Send message to all VF's
 974			 * before PF unload
 975			 */
 976		}
 977		bnxt_close_nic(bp, true, false);
 978	}
 979
 980	if (sh) {
 981		bp->flags |= BNXT_FLAG_SHARED_RINGS;
 982		bp->rx_nr_rings = channel->combined_count;
 983		bp->tx_nr_rings_per_tc = channel->combined_count;
 984	} else {
 985		bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
 986		bp->rx_nr_rings = channel->rx_count;
 987		bp->tx_nr_rings_per_tc = channel->tx_count;
 988	}
 989	bp->tx_nr_rings_xdp = tx_xdp;
 990	bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
 991	if (tcs > 1)
 992		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
 993
 994	tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
 995	bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
 996			       tx_cp + bp->rx_nr_rings;
 997
 998	/* After changing number of rx channels, update NTUPLE feature. */
 999	netdev_update_features(dev);
1000	if (netif_running(dev)) {
1001		rc = bnxt_open_nic(bp, true, false);
1002		if ((!rc) && BNXT_PF(bp)) {
1003			/* TODO CHIMP_FW: Send message to all VF's
1004			 * to renable
1005			 */
1006		}
1007	} else {
1008		rc = bnxt_reserve_rings(bp, true);
1009	}
1010
1011	return rc;
1012}
1013
1014static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
1015				     int tbl_size, u32 *ids, u32 start,
1016				     u32 id_cnt)
1017{
1018	int i, j = start;
1019
1020	if (j >= id_cnt)
1021		return j;
1022	for (i = 0; i < tbl_size; i++) {
1023		struct hlist_head *head;
1024		struct bnxt_filter_base *fltr;
1025
1026		head = &tbl[i];
1027		hlist_for_each_entry_rcu(fltr, head, hash) {
1028			if (!fltr->flags ||
1029			    test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
1030				continue;
1031			ids[j++] = fltr->sw_id;
1032			if (j == id_cnt)
1033				return j;
1034		}
1035	}
1036	return j;
1037}
1038
1039static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
1040						      struct hlist_head tbl[],
1041						      int tbl_size, u32 id)
1042{
1043	int i;
1044
1045	for (i = 0; i < tbl_size; i++) {
1046		struct hlist_head *head;
1047		struct bnxt_filter_base *fltr;
1048
1049		head = &tbl[i];
1050		hlist_for_each_entry_rcu(fltr, head, hash) {
1051			if (fltr->flags && fltr->sw_id == id)
1052				return fltr;
1053		}
1054	}
1055	return NULL;
1056}
1057
1058static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
1059			    u32 *rule_locs)
1060{
1061	cmd->data = bp->ntp_fltr_count;
1062	rcu_read_lock();
1063	cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
1064						  BNXT_NTP_FLTR_HASH_SIZE,
1065						  rule_locs, 0, cmd->rule_cnt);
1066	rcu_read_unlock();
1067
1068	return 0;
1069}
1070
1071static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1072{
1073	struct ethtool_rx_flow_spec *fs =
1074		(struct ethtool_rx_flow_spec *)&cmd->fs;
1075	struct bnxt_filter_base *fltr_base;
1076	struct bnxt_ntuple_filter *fltr;
1077	struct flow_keys *fkeys;
1078	int rc = -EINVAL;
1079
1080	if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
1081		return rc;
1082
1083	rcu_read_lock();
1084	fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1085					  BNXT_NTP_FLTR_HASH_SIZE,
1086					  fs->location);
1087	if (!fltr_base) {
1088		rcu_read_unlock();
1089		return rc;
1090	}
1091	fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1092
1093	fkeys = &fltr->fkeys;
1094	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1095		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1096			fs->flow_type = TCP_V4_FLOW;
1097		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1098			fs->flow_type = UDP_V4_FLOW;
1099		else
1100			goto fltr_err;
1101
1102		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
1103			fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1104			fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
1105		}
1106		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
1107			fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1108			fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
1109		}
1110		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
1111			fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1112			fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
1113		}
1114		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
1115			fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1116			fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
1117		}
1118	} else {
1119		if (fkeys->basic.ip_proto == IPPROTO_TCP)
1120			fs->flow_type = TCP_V6_FLOW;
1121		else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1122			fs->flow_type = UDP_V6_FLOW;
1123		else
1124			goto fltr_err;
1125
1126		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
1127			*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1128				fkeys->addrs.v6addrs.src;
1129			bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
1130		}
1131		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
1132			*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1133				fkeys->addrs.v6addrs.dst;
1134			bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
1135		}
1136		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
1137			fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1138			fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1139		}
1140		if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
1141			fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1142			fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1143		}
1144	}
1145
1146	fs->ring_cookie = fltr->base.rxq;
1147	rc = 0;
1148
1149fltr_err:
1150	rcu_read_unlock();
1151
1152	return rc;
1153}
1154
1155#define IPV4_ALL_MASK		((__force __be32)~0)
1156#define L4_PORT_ALL_MASK	((__force __be16)~0)
1157
1158static bool ipv6_mask_is_full(__be32 mask[4])
1159{
1160	return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
1161}
1162
1163static bool ipv6_mask_is_zero(__be32 mask[4])
1164{
1165	return !(mask[0] | mask[1] | mask[2] | mask[3]);
1166}
1167
1168static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
1169				    struct ethtool_rx_flow_spec *fs)
1170{
1171	u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1172	u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1173	struct bnxt_ntuple_filter *new_fltr, *fltr;
1174	struct bnxt_l2_filter *l2_fltr;
1175	u32 flow_type = fs->flow_type;
1176	struct flow_keys *fkeys;
1177	u32 idx;
1178	int rc;
1179
1180	if (!bp->vnic_info)
1181		return -EAGAIN;
1182
1183	if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
1184		return -EOPNOTSUPP;
1185
1186	new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
1187	if (!new_fltr)
1188		return -ENOMEM;
1189
1190	l2_fltr = bp->vnic_info[0].l2_filters[0];
1191	atomic_inc(&l2_fltr->refcnt);
1192	new_fltr->l2_fltr = l2_fltr;
1193	fkeys = &new_fltr->fkeys;
1194
1195	rc = -EOPNOTSUPP;
1196	switch (flow_type) {
1197	case TCP_V4_FLOW:
1198	case UDP_V4_FLOW: {
1199		struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
1200		struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
1201
1202		fkeys->basic.ip_proto = IPPROTO_TCP;
1203		if (flow_type == UDP_V4_FLOW)
1204			fkeys->basic.ip_proto = IPPROTO_UDP;
1205		fkeys->basic.n_proto = htons(ETH_P_IP);
1206
1207		if (ip_mask->ip4src == IPV4_ALL_MASK) {
1208			fkeys->addrs.v4addrs.src = ip_spec->ip4src;
1209			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
1210		} else if (ip_mask->ip4src) {
1211			goto ntuple_err;
1212		}
1213		if (ip_mask->ip4dst == IPV4_ALL_MASK) {
1214			fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
1215			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
1216		} else if (ip_mask->ip4dst) {
1217			goto ntuple_err;
1218		}
1219
1220		if (ip_mask->psrc == L4_PORT_ALL_MASK) {
1221			fkeys->ports.src = ip_spec->psrc;
1222			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
1223		} else if (ip_mask->psrc) {
1224			goto ntuple_err;
1225		}
1226		if (ip_mask->pdst == L4_PORT_ALL_MASK) {
1227			fkeys->ports.dst = ip_spec->pdst;
1228			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
1229		} else if (ip_mask->pdst) {
1230			goto ntuple_err;
1231		}
1232		break;
1233	}
1234	case TCP_V6_FLOW:
1235	case UDP_V6_FLOW: {
1236		struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
1237		struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
1238
1239		fkeys->basic.ip_proto = IPPROTO_TCP;
1240		if (flow_type == UDP_V6_FLOW)
1241			fkeys->basic.ip_proto = IPPROTO_UDP;
1242		fkeys->basic.n_proto = htons(ETH_P_IPV6);
1243
1244		if (ipv6_mask_is_full(ip_mask->ip6src)) {
1245			fkeys->addrs.v6addrs.src =
1246				*(struct in6_addr *)&ip_spec->ip6src;
1247			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
1248		} else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
1249			goto ntuple_err;
1250		}
1251		if (ipv6_mask_is_full(ip_mask->ip6dst)) {
1252			fkeys->addrs.v6addrs.dst =
1253				*(struct in6_addr *)&ip_spec->ip6dst;
1254			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
1255		} else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
1256			goto ntuple_err;
1257		}
1258
1259		if (ip_mask->psrc == L4_PORT_ALL_MASK) {
1260			fkeys->ports.src = ip_spec->psrc;
1261			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
1262		} else if (ip_mask->psrc) {
1263			goto ntuple_err;
1264		}
1265		if (ip_mask->pdst == L4_PORT_ALL_MASK) {
1266			fkeys->ports.dst = ip_spec->pdst;
1267			new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
1268		} else if (ip_mask->pdst) {
1269			goto ntuple_err;
1270		}
1271		break;
1272	}
1273	default:
1274		rc = -EOPNOTSUPP;
1275		goto ntuple_err;
1276	}
1277	if (!new_fltr->ntuple_flags)
1278		goto ntuple_err;
1279
1280	idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
1281	rcu_read_lock();
1282	fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
1283	if (fltr) {
1284		rcu_read_unlock();
1285		rc = -EEXIST;
1286		goto ntuple_err;
1287	}
1288	rcu_read_unlock();
1289
1290	new_fltr->base.rxq = ring;
1291	new_fltr->base.flags = BNXT_ACT_NO_AGING;
1292	__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
1293	rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
1294	if (!rc) {
1295		rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
1296		if (rc) {
1297			bnxt_del_ntp_filter(bp, new_fltr);
1298			return rc;
1299		}
1300		fs->location = new_fltr->base.sw_id;
1301		return 0;
1302	}
1303
1304ntuple_err:
1305	atomic_dec(&l2_fltr->refcnt);
1306	kfree(new_fltr);
1307	return rc;
1308}
1309
1310static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1311{
1312	struct ethtool_rx_flow_spec *fs = &cmd->fs;
1313	u32 ring, flow_type;
1314	int rc;
1315	u8 vf;
1316
1317	if (!netif_running(bp->dev))
1318		return -EAGAIN;
1319	if (!(bp->flags & BNXT_FLAG_RFS))
1320		return -EPERM;
1321	if (fs->location != RX_CLS_LOC_ANY)
1322		return -EINVAL;
1323
1324	ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1325	vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1326	if (BNXT_VF(bp) && vf)
1327		return -EINVAL;
1328	if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
1329		return -EINVAL;
1330	if (!vf && ring >= bp->rx_nr_rings)
1331		return -EINVAL;
1332
1333	flow_type = fs->flow_type;
1334	if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
1335		return -EINVAL;
1336	flow_type &= ~FLOW_EXT;
1337	if (flow_type == ETHER_FLOW)
1338		rc = -EOPNOTSUPP;
1339	else
1340		rc = bnxt_add_ntuple_cls_rule(bp, fs);
1341	return rc;
1342}
1343
1344static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1345{
1346	struct ethtool_rx_flow_spec *fs = &cmd->fs;
1347	struct bnxt_filter_base *fltr_base;
1348	struct bnxt_ntuple_filter *fltr;
1349
1350	rcu_read_lock();
1351	fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1352					  BNXT_NTP_FLTR_HASH_SIZE,
1353					  fs->location);
1354	if (!fltr_base) {
1355		rcu_read_unlock();
1356		return -ENOENT;
1357	}
1358
1359	fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1360	if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
1361		rcu_read_unlock();
1362		return -EINVAL;
1363	}
1364	rcu_read_unlock();
1365	bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
1366	bnxt_del_ntp_filter(bp, fltr);
1367	return 0;
1368}
1369
1370static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1371{
1372	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1373		return RXH_IP_SRC | RXH_IP_DST;
1374	return 0;
1375}
1376
1377static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1378{
1379	if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1380		return RXH_IP_SRC | RXH_IP_DST;
1381	return 0;
1382}
1383
1384static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1385{
1386	cmd->data = 0;
1387	switch (cmd->flow_type) {
1388	case TCP_V4_FLOW:
1389		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1390			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1391				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1392		cmd->data |= get_ethtool_ipv4_rss(bp);
1393		break;
1394	case UDP_V4_FLOW:
1395		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1396			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1397				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1398		fallthrough;
1399	case SCTP_V4_FLOW:
1400	case AH_ESP_V4_FLOW:
1401	case AH_V4_FLOW:
1402	case ESP_V4_FLOW:
1403	case IPV4_FLOW:
1404		cmd->data |= get_ethtool_ipv4_rss(bp);
1405		break;
1406
1407	case TCP_V6_FLOW:
1408		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1409			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1410				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1411		cmd->data |= get_ethtool_ipv6_rss(bp);
1412		break;
1413	case UDP_V6_FLOW:
1414		if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1415			cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1416				     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1417		fallthrough;
1418	case SCTP_V6_FLOW:
1419	case AH_ESP_V6_FLOW:
1420	case AH_V6_FLOW:
1421	case ESP_V6_FLOW:
1422	case IPV6_FLOW:
1423		cmd->data |= get_ethtool_ipv6_rss(bp);
1424		break;
1425	}
1426	return 0;
1427}
1428
1429#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1430#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1431
1432static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1433{
1434	u32 rss_hash_cfg = bp->rss_hash_cfg;
1435	int tuple, rc = 0;
1436
1437	if (cmd->data == RXH_4TUPLE)
1438		tuple = 4;
1439	else if (cmd->data == RXH_2TUPLE)
1440		tuple = 2;
1441	else if (!cmd->data)
1442		tuple = 0;
1443	else
1444		return -EINVAL;
1445
1446	if (cmd->flow_type == TCP_V4_FLOW) {
1447		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1448		if (tuple == 4)
1449			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1450	} else if (cmd->flow_type == UDP_V4_FLOW) {
1451		if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1452			return -EINVAL;
1453		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1454		if (tuple == 4)
1455			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1456	} else if (cmd->flow_type == TCP_V6_FLOW) {
1457		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1458		if (tuple == 4)
1459			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1460	} else if (cmd->flow_type == UDP_V6_FLOW) {
1461		if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1462			return -EINVAL;
1463		rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1464		if (tuple == 4)
1465			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1466	} else if (tuple == 4) {
1467		return -EINVAL;
1468	}
1469
1470	switch (cmd->flow_type) {
1471	case TCP_V4_FLOW:
1472	case UDP_V4_FLOW:
1473	case SCTP_V4_FLOW:
1474	case AH_ESP_V4_FLOW:
1475	case AH_V4_FLOW:
1476	case ESP_V4_FLOW:
1477	case IPV4_FLOW:
1478		if (tuple == 2)
1479			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1480		else if (!tuple)
1481			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1482		break;
1483
1484	case TCP_V6_FLOW:
1485	case UDP_V6_FLOW:
1486	case SCTP_V6_FLOW:
1487	case AH_ESP_V6_FLOW:
1488	case AH_V6_FLOW:
1489	case ESP_V6_FLOW:
1490	case IPV6_FLOW:
1491		if (tuple == 2)
1492			rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1493		else if (!tuple)
1494			rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1495		break;
1496	}
1497
1498	if (bp->rss_hash_cfg == rss_hash_cfg)
1499		return 0;
1500
1501	if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
1502		bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
1503	bp->rss_hash_cfg = rss_hash_cfg;
1504	if (netif_running(bp->dev)) {
1505		bnxt_close_nic(bp, false, false);
1506		rc = bnxt_open_nic(bp, false, false);
1507	}
1508	return rc;
1509}
1510
1511static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1512			  u32 *rule_locs)
1513{
1514	struct bnxt *bp = netdev_priv(dev);
1515	int rc = 0;
1516
1517	switch (cmd->cmd) {
1518	case ETHTOOL_GRXRINGS:
1519		cmd->data = bp->rx_nr_rings;
1520		break;
1521
1522	case ETHTOOL_GRXCLSRLCNT:
1523		cmd->rule_cnt = bp->ntp_fltr_count;
1524		cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
1525		break;
1526
1527	case ETHTOOL_GRXCLSRLALL:
1528		rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1529		break;
1530
1531	case ETHTOOL_GRXCLSRULE:
1532		rc = bnxt_grxclsrule(bp, cmd);
1533		break;
1534
1535	case ETHTOOL_GRXFH:
1536		rc = bnxt_grxfh(bp, cmd);
1537		break;
1538
1539	default:
1540		rc = -EOPNOTSUPP;
1541		break;
1542	}
1543
1544	return rc;
1545}
1546
1547static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1548{
1549	struct bnxt *bp = netdev_priv(dev);
1550	int rc;
1551
1552	switch (cmd->cmd) {
1553	case ETHTOOL_SRXFH:
1554		rc = bnxt_srxfh(bp, cmd);
1555		break;
1556
1557	case ETHTOOL_SRXCLSRLINS:
1558		rc = bnxt_srxclsrlins(bp, cmd);
1559		break;
1560
1561	case ETHTOOL_SRXCLSRLDEL:
1562		rc = bnxt_srxclsrldel(bp, cmd);
1563		break;
1564
1565	default:
1566		rc = -EOPNOTSUPP;
1567		break;
1568	}
1569	return rc;
1570}
1571
1572u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1573{
1574	struct bnxt *bp = netdev_priv(dev);
1575
1576	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1577		return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
1578		       BNXT_RSS_TABLE_ENTRIES_P5;
1579	return HW_HASH_INDEX_SIZE;
1580}
1581
1582static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1583{
1584	return HW_HASH_KEY_SIZE;
1585}
1586
1587static int bnxt_get_rxfh(struct net_device *dev,
1588			 struct ethtool_rxfh_param *rxfh)
1589{
1590	struct bnxt *bp = netdev_priv(dev);
1591	struct bnxt_vnic_info *vnic;
1592	u32 i, tbl_size;
1593
1594	rxfh->hfunc = ETH_RSS_HASH_TOP;
1595
1596	if (!bp->vnic_info)
1597		return 0;
1598
1599	vnic = &bp->vnic_info[0];
1600	if (rxfh->indir && bp->rss_indir_tbl) {
1601		tbl_size = bnxt_get_rxfh_indir_size(dev);
1602		for (i = 0; i < tbl_size; i++)
1603			rxfh->indir[i] = bp->rss_indir_tbl[i];
1604	}
1605
1606	if (rxfh->key && vnic->rss_hash_key)
1607		memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1608
1609	return 0;
1610}
1611
1612static int bnxt_set_rxfh(struct net_device *dev,
1613			 struct ethtool_rxfh_param *rxfh,
1614			 struct netlink_ext_ack *extack)
1615{
1616	struct bnxt *bp = netdev_priv(dev);
1617	int rc = 0;
1618
1619	if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
1620		return -EOPNOTSUPP;
1621
1622	if (rxfh->key)
1623		return -EOPNOTSUPP;
1624
1625	if (rxfh->indir) {
1626		u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1627
1628		for (i = 0; i < tbl_size; i++)
1629			bp->rss_indir_tbl[i] = rxfh->indir[i];
1630		pad = bp->rss_indir_tbl_entries - tbl_size;
1631		if (pad)
1632			memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1633	}
1634
1635	if (netif_running(bp->dev)) {
1636		bnxt_close_nic(bp, false, false);
1637		rc = bnxt_open_nic(bp, false, false);
1638	}
1639	return rc;
1640}
1641
1642static void bnxt_get_drvinfo(struct net_device *dev,
1643			     struct ethtool_drvinfo *info)
1644{
1645	struct bnxt *bp = netdev_priv(dev);
1646
1647	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1648	strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1649	strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1650	info->n_stats = bnxt_get_num_stats(bp);
1651	info->testinfo_len = bp->num_tests;
1652	/* TODO CHIMP_FW: eeprom dump details */
1653	info->eedump_len = 0;
1654	/* TODO CHIMP FW: reg dump details */
1655	info->regdump_len = 0;
1656}
1657
1658static int bnxt_get_regs_len(struct net_device *dev)
1659{
1660	struct bnxt *bp = netdev_priv(dev);
1661	int reg_len;
1662
1663	if (!BNXT_PF(bp))
1664		return -EOPNOTSUPP;
1665
1666	reg_len = BNXT_PXP_REG_LEN;
1667
1668	if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1669		reg_len += sizeof(struct pcie_ctx_hw_stats);
1670
1671	return reg_len;
1672}
1673
1674static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1675			  void *_p)
1676{
1677	struct pcie_ctx_hw_stats *hw_pcie_stats;
1678	struct hwrm_pcie_qstats_input *req;
1679	struct bnxt *bp = netdev_priv(dev);
1680	dma_addr_t hw_pcie_stats_addr;
1681	int rc;
1682
1683	regs->version = 0;
1684	bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1685
1686	if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1687		return;
1688
1689	if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
1690		return;
1691
1692	hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
1693					   &hw_pcie_stats_addr);
1694	if (!hw_pcie_stats) {
1695		hwrm_req_drop(bp, req);
1696		return;
1697	}
1698
1699	regs->version = 1;
1700	hwrm_req_hold(bp, req); /* hold on to slice */
1701	req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1702	req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1703	rc = hwrm_req_send(bp, req);
1704	if (!rc) {
1705		__le64 *src = (__le64 *)hw_pcie_stats;
1706		u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1707		int i;
1708
1709		for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1710			dst[i] = le64_to_cpu(src[i]);
1711	}
1712	hwrm_req_drop(bp, req);
1713}
1714
1715static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1716{
1717	struct bnxt *bp = netdev_priv(dev);
1718
1719	wol->supported = 0;
1720	wol->wolopts = 0;
1721	memset(&wol->sopass, 0, sizeof(wol->sopass));
1722	if (bp->flags & BNXT_FLAG_WOL_CAP) {
1723		wol->supported = WAKE_MAGIC;
1724		if (bp->wol)
1725			wol->wolopts = WAKE_MAGIC;
1726	}
1727}
1728
1729static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1730{
1731	struct bnxt *bp = netdev_priv(dev);
1732
1733	if (wol->wolopts & ~WAKE_MAGIC)
1734		return -EINVAL;
1735
1736	if (wol->wolopts & WAKE_MAGIC) {
1737		if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1738			return -EINVAL;
1739		if (!bp->wol) {
1740			if (bnxt_hwrm_alloc_wol_fltr(bp))
1741				return -EBUSY;
1742			bp->wol = 1;
1743		}
1744	} else {
1745		if (bp->wol) {
1746			if (bnxt_hwrm_free_wol_fltr(bp))
1747				return -EBUSY;
1748			bp->wol = 0;
1749		}
1750	}
1751	return 0;
1752}
1753
1754u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1755{
1756	u32 speed_mask = 0;
1757
1758	/* TODO: support 25GB, 40GB, 50GB with different cable type */
1759	/* set the advertised speeds */
1760	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1761		speed_mask |= ADVERTISED_100baseT_Full;
1762	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1763		speed_mask |= ADVERTISED_1000baseT_Full;
1764	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1765		speed_mask |= ADVERTISED_2500baseX_Full;
1766	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1767		speed_mask |= ADVERTISED_10000baseT_Full;
1768	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1769		speed_mask |= ADVERTISED_40000baseCR4_Full;
1770
1771	if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1772		speed_mask |= ADVERTISED_Pause;
1773	else if (fw_pause & BNXT_LINK_PAUSE_TX)
1774		speed_mask |= ADVERTISED_Asym_Pause;
1775	else if (fw_pause & BNXT_LINK_PAUSE_RX)
1776		speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1777
1778	return speed_mask;
1779}
1780
1781enum bnxt_media_type {
1782	BNXT_MEDIA_UNKNOWN = 0,
1783	BNXT_MEDIA_TP,
1784	BNXT_MEDIA_CR,
1785	BNXT_MEDIA_SR,
1786	BNXT_MEDIA_LR_ER_FR,
1787	BNXT_MEDIA_KR,
1788	BNXT_MEDIA_KX,
1789	BNXT_MEDIA_X,
1790	__BNXT_MEDIA_END,
1791};
1792
1793static const enum bnxt_media_type bnxt_phy_types[] = {
1794	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR,
1795	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] =  BNXT_MEDIA_KR,
1796	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR,
1797	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR,
1798	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR,
1799	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX,
1800	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR,
1801	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP,
1802	[PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP,
1803	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR,
1804	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR,
1805	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR,
1806	[PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR,
1807	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR,
1808	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR,
1809	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1810	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1811	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR,
1812	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR,
1813	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR,
1814	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1815	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1816	[PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR,
1817	[PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP,
1818	[PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X,
1819	[PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X,
1820	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR,
1821	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR,
1822	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1823	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1824	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR,
1825	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR,
1826	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR,
1827	[PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR,
1828	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR,
1829	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
1830	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
1831	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
1832	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
1833	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
1834	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
1835	[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
1836	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
1837	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
1838	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
1839	[PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
1840	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
1841	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
1842	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
1843	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
1844	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
1845	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
1846	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1847	[PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1848};
1849
1850static enum bnxt_media_type
1851bnxt_get_media(struct bnxt_link_info *link_info)
1852{
1853	switch (link_info->media_type) {
1854	case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP:
1855		return BNXT_MEDIA_TP;
1856	case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC:
1857		return BNXT_MEDIA_CR;
1858	default:
1859		if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types))
1860			return bnxt_phy_types[link_info->phy_type];
1861		return BNXT_MEDIA_UNKNOWN;
1862	}
1863}
1864
1865enum bnxt_link_speed_indices {
1866	BNXT_LINK_SPEED_UNKNOWN = 0,
1867	BNXT_LINK_SPEED_100MB_IDX,
1868	BNXT_LINK_SPEED_1GB_IDX,
1869	BNXT_LINK_SPEED_10GB_IDX,
1870	BNXT_LINK_SPEED_25GB_IDX,
1871	BNXT_LINK_SPEED_40GB_IDX,
1872	BNXT_LINK_SPEED_50GB_IDX,
1873	BNXT_LINK_SPEED_100GB_IDX,
1874	BNXT_LINK_SPEED_200GB_IDX,
1875	BNXT_LINK_SPEED_400GB_IDX,
1876	__BNXT_LINK_SPEED_END
1877};
1878
1879static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
1880{
1881	switch (speed) {
1882	case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX;
1883	case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX;
1884	case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
1885	case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
1886	case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
1887	case BNXT_LINK_SPEED_50GB:
1888	case BNXT_LINK_SPEED_50GB_PAM4:
1889		return BNXT_LINK_SPEED_50GB_IDX;
1890	case BNXT_LINK_SPEED_100GB:
1891	case BNXT_LINK_SPEED_100GB_PAM4:
1892	case BNXT_LINK_SPEED_100GB_PAM4_112:
1893		return BNXT_LINK_SPEED_100GB_IDX;
1894	case BNXT_LINK_SPEED_200GB:
1895	case BNXT_LINK_SPEED_200GB_PAM4:
1896	case BNXT_LINK_SPEED_200GB_PAM4_112:
1897		return BNXT_LINK_SPEED_200GB_IDX;
1898	case BNXT_LINK_SPEED_400GB:
1899	case BNXT_LINK_SPEED_400GB_PAM4:
1900	case BNXT_LINK_SPEED_400GB_PAM4_112:
1901		return BNXT_LINK_SPEED_400GB_IDX;
1902	default: return BNXT_LINK_SPEED_UNKNOWN;
1903	}
1904}
1905
1906static const enum ethtool_link_mode_bit_indices
1907bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
1908	[BNXT_LINK_SPEED_100MB_IDX] = {
1909		{
1910			[BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1911		},
1912	},
1913	[BNXT_LINK_SPEED_1GB_IDX] = {
1914		{
1915			[BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1916			/* historically baseT, but DAC is more correctly baseX */
1917			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1918			[BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1919			[BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1920			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1921		},
1922	},
1923	[BNXT_LINK_SPEED_10GB_IDX] = {
1924		{
1925			[BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1926			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1927			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1928			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1929			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1930			[BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1931		},
1932	},
1933	[BNXT_LINK_SPEED_25GB_IDX] = {
1934		{
1935			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1936			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1937			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1938		},
1939	},
1940	[BNXT_LINK_SPEED_40GB_IDX] = {
1941		{
1942			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1943			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1944			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1945			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1946		},
1947	},
1948	[BNXT_LINK_SPEED_50GB_IDX] = {
1949		[BNXT_SIG_MODE_NRZ] = {
1950			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1951			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1952			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1953		},
1954		[BNXT_SIG_MODE_PAM4] = {
1955			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
1956			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
1957			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1958			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
1959		},
1960	},
1961	[BNXT_LINK_SPEED_100GB_IDX] = {
1962		[BNXT_SIG_MODE_NRZ] = {
1963			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1964			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1965			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1966			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1967		},
1968		[BNXT_SIG_MODE_PAM4] = {
1969			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
1970			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
1971			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
1972			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
1973		},
1974		[BNXT_SIG_MODE_PAM4_112] = {
1975			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
1976			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
1977			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
1978			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
1979		},
1980	},
1981	[BNXT_LINK_SPEED_200GB_IDX] = {
1982		[BNXT_SIG_MODE_PAM4] = {
1983			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1984			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1985			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1986			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1987		},
1988		[BNXT_SIG_MODE_PAM4_112] = {
1989			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
1990			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
1991			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
1992			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
1993		},
1994	},
1995	[BNXT_LINK_SPEED_400GB_IDX] = {
1996		[BNXT_SIG_MODE_PAM4] = {
1997			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
1998			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
1999			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
2000			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
2001		},
2002		[BNXT_SIG_MODE_PAM4_112] = {
2003			[BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
2004			[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
2005			[BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
2006			[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
2007		},
2008	},
2009};
2010
2011#define BNXT_LINK_MODE_UNKNOWN -1
2012
2013static enum ethtool_link_mode_bit_indices
2014bnxt_get_link_mode(struct bnxt_link_info *link_info)
2015{
2016	enum ethtool_link_mode_bit_indices link_mode;
2017	enum bnxt_link_speed_indices speed;
2018	enum bnxt_media_type media;
2019	u8 sig_mode;
2020
2021	if (link_info->phy_link_status != BNXT_LINK_LINK)
2022		return BNXT_LINK_MODE_UNKNOWN;
2023
2024	media = bnxt_get_media(link_info);
2025	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
2026		speed = bnxt_fw_speed_idx(link_info->link_speed);
2027		sig_mode = link_info->active_fec_sig_mode &
2028			PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
2029	} else {
2030		speed = bnxt_fw_speed_idx(link_info->req_link_speed);
2031		sig_mode = link_info->req_signal_mode;
2032	}
2033	if (sig_mode >= BNXT_SIG_MODE_MAX)
2034		return BNXT_LINK_MODE_UNKNOWN;
2035
2036	/* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
2037	 * link mode, but since no such devices exist, the zeroes in the
2038	 * map can be conveniently used to represent unknown link modes.
2039	 */
2040	link_mode = bnxt_link_modes[speed][sig_mode][media];
2041	if (!link_mode)
2042		return BNXT_LINK_MODE_UNKNOWN;
2043
2044	switch (link_mode) {
2045	case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
2046		if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2047			link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT;
2048		break;
2049	case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
2050		if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2051			link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT;
2052		break;
2053	default:
2054		break;
2055	}
2056
2057	return link_mode;
2058}
2059
2060static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
2061				   struct ethtool_link_ksettings *lk_ksettings)
2062{
2063	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2064
2065	if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
2066		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2067				 lk_ksettings->link_modes.supported);
2068		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2069				 lk_ksettings->link_modes.supported);
2070	}
2071
2072	if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
2073	    link_info->support_pam4_auto_speeds)
2074		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2075				 lk_ksettings->link_modes.supported);
2076
2077	if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2078		return;
2079
2080	if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX)
2081		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2082				 lk_ksettings->link_modes.advertising);
2083	if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1)
2084		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2085				 lk_ksettings->link_modes.advertising);
2086	if (link_info->lp_pause & BNXT_LINK_PAUSE_RX)
2087		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2088				 lk_ksettings->link_modes.lp_advertising);
2089	if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1)
2090		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2091				 lk_ksettings->link_modes.lp_advertising);
2092}
2093
2094static const u16 bnxt_nrz_speed_masks[] = {
2095	[BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB,
2096	[BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB,
2097	[BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB,
2098	[BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB,
2099	[BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB,
2100	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB,
2101	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB,
2102	[__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2103};
2104
2105static const u16 bnxt_pam4_speed_masks[] = {
2106	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
2107	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
2108	[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
2109	[__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2110};
2111
2112static const u16 bnxt_nrz_speeds2_masks[] = {
2113	[BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
2114	[BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
2115	[BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
2116	[BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
2117	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
2118	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
2119	[__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2120};
2121
2122static const u16 bnxt_pam4_speeds2_masks[] = {
2123	[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
2124	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
2125	[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
2126	[BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
2127};
2128
2129static const u16 bnxt_pam4_112_speeds2_masks[] = {
2130	[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
2131	[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
2132	[BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
2133};
2134
2135static enum bnxt_link_speed_indices
2136bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
2137{
2138	const u16 *speeds;
2139	int idx, len;
2140
2141	switch (sig_mode) {
2142	case BNXT_SIG_MODE_NRZ:
2143		if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2144			speeds = bnxt_nrz_speeds2_masks;
2145			len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
2146		} else {
2147			speeds = bnxt_nrz_speed_masks;
2148			len = ARRAY_SIZE(bnxt_nrz_speed_masks);
2149		}
2150		break;
2151	case BNXT_SIG_MODE_PAM4:
2152		if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2153			speeds = bnxt_pam4_speeds2_masks;
2154			len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
2155		} else {
2156			speeds = bnxt_pam4_speed_masks;
2157			len = ARRAY_SIZE(bnxt_pam4_speed_masks);
2158		}
2159		break;
2160	case BNXT_SIG_MODE_PAM4_112:
2161		speeds = bnxt_pam4_112_speeds2_masks;
2162		len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
2163		break;
2164	default:
2165		return BNXT_LINK_SPEED_UNKNOWN;
2166	}
2167
2168	for (idx = 0; idx < len; idx++) {
2169		if (speeds[idx] == speed_msk)
2170			return idx;
2171	}
2172
2173	return BNXT_LINK_SPEED_UNKNOWN;
2174}
2175
2176#define BNXT_FW_SPEED_MSK_BITS 16
2177
2178static void
2179__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2180			  u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2181{
2182	enum ethtool_link_mode_bit_indices link_mode;
2183	enum bnxt_link_speed_indices speed;
2184	u8 bit;
2185
2186	for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
2187		speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
2188		if (!speed)
2189			continue;
2190
2191		link_mode = bnxt_link_modes[speed][sig_mode][media];
2192		if (!link_mode)
2193			continue;
2194
2195		linkmode_set_bit(link_mode, et_mask);
2196	}
2197}
2198
2199static void
2200bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2201			u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2202{
2203	if (media) {
2204		__bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2205					  et_mask);
2206		return;
2207	}
2208
2209	/* list speeds for all media if unknown */
2210	for (media = 1; media < __BNXT_MEDIA_END; media++)
2211		__bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2212					  et_mask);
2213}
2214
2215static void
2216bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
2217				    enum bnxt_media_type media,
2218				    struct ethtool_link_ksettings *lk_ksettings)
2219{
2220	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2221	u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2222	u16 phy_flags = bp->phy_flags;
2223
2224	if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2225		sp_nrz = link_info->support_speeds2;
2226		sp_pam4 = link_info->support_speeds2;
2227		sp_pam4_112 = link_info->support_speeds2;
2228	} else {
2229		sp_nrz = link_info->support_speeds;
2230		sp_pam4 = link_info->support_pam4_speeds;
2231	}
2232	bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2233				lk_ksettings->link_modes.supported);
2234	bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2235				lk_ksettings->link_modes.supported);
2236	bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2237				phy_flags, lk_ksettings->link_modes.supported);
2238}
2239
2240static void
2241bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
2242				enum bnxt_media_type media,
2243				struct ethtool_link_ksettings *lk_ksettings)
2244{
2245	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2246	u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2247	u16 phy_flags = bp->phy_flags;
2248
2249	sp_nrz = link_info->advertising;
2250	if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2251		sp_pam4 = link_info->advertising;
2252		sp_pam4_112 = link_info->advertising;
2253	} else {
2254		sp_pam4 = link_info->advertising_pam4;
2255	}
2256	bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2257				lk_ksettings->link_modes.advertising);
2258	bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2259				lk_ksettings->link_modes.advertising);
2260	bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2261				phy_flags, lk_ksettings->link_modes.advertising);
2262}
2263
2264static void
2265bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
2266			       enum bnxt_media_type media,
2267			       struct ethtool_link_ksettings *lk_ksettings)
2268{
2269	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2270	u16 phy_flags = bp->phy_flags;
2271
2272	bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
2273				BNXT_SIG_MODE_NRZ, phy_flags,
2274				lk_ksettings->link_modes.lp_advertising);
2275	bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
2276				BNXT_SIG_MODE_PAM4, phy_flags,
2277				lk_ksettings->link_modes.lp_advertising);
2278}
2279
2280static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
2281			      u16 speed_msk, const unsigned long *et_mask,
2282			      enum ethtool_link_mode_bit_indices mode)
2283{
2284	bool mode_desired = linkmode_test_bit(mode, et_mask);
2285
2286	if (!mode)
2287		return;
2288
2289	/* enabled speeds for installed media should override */
2290	if (installed_media && mode_desired) {
2291		*speeds |= speed_msk;
2292		*delta |= speed_msk;
2293		return;
2294	}
2295
2296	/* many to one mapping, only allow one change per fw_speed bit */
2297	if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) {
2298		*speeds ^= speed_msk;
2299		*delta |= speed_msk;
2300	}
2301}
2302
2303static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
2304				    const unsigned long *et_mask)
2305{
2306	struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2307	u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
2308	enum bnxt_media_type media = bnxt_get_media(link_info);
2309	u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
2310	u32 delta_pam4_112 = 0;
2311	u32 delta_pam4 = 0;
2312	u32 delta_nrz = 0;
2313	int i, m;
2314
2315	adv = &link_info->advertising;
2316	if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2317		adv_pam4 = &link_info->advertising;
2318		adv_pam4_112 = &link_info->advertising;
2319		sp_msks = bnxt_nrz_speeds2_masks;
2320		sp_pam4_msks = bnxt_pam4_speeds2_masks;
2321		sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
2322	} else {
2323		adv_pam4 = &link_info->advertising_pam4;
2324		sp_msks = bnxt_nrz_speed_masks;
2325		sp_pam4_msks = bnxt_pam4_speed_masks;
2326	}
2327	for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
2328		/* accept any legal media from user */
2329		for (m = 1; m < __BNXT_MEDIA_END; m++) {
2330			bnxt_update_speed(&delta_nrz, m == media,
2331					  adv, sp_msks[i], et_mask,
2332					  bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
2333			bnxt_update_speed(&delta_pam4, m == media,
2334					  adv_pam4, sp_pam4_msks[i], et_mask,
2335					  bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
2336			if (!adv_pam4_112)
2337				continue;
2338
2339			bnxt_update_speed(&delta_pam4_112, m == media,
2340					  adv_pam4_112, sp_pam4_112_msks[i], et_mask,
2341					  bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
2342		}
2343	}
2344}
2345
2346static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
2347				struct ethtool_link_ksettings *lk_ksettings)
2348{
2349	u16 fec_cfg = link_info->fec_cfg;
2350
2351	if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
2352		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2353				 lk_ksettings->link_modes.advertising);
2354		return;
2355	}
2356	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2357		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2358				 lk_ksettings->link_modes.advertising);
2359	if (fec_cfg & BNXT_FEC_ENC_RS)
2360		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2361				 lk_ksettings->link_modes.advertising);
2362	if (fec_cfg & BNXT_FEC_ENC_LLRS)
2363		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2364				 lk_ksettings->link_modes.advertising);
2365}
2366
2367static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
2368				struct ethtool_link_ksettings *lk_ksettings)
2369{
2370	u16 fec_cfg = link_info->fec_cfg;
2371
2372	if (fec_cfg & BNXT_FEC_NONE) {
2373		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2374				 lk_ksettings->link_modes.supported);
2375		return;
2376	}
2377	if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
2378		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2379				 lk_ksettings->link_modes.supported);
2380	if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
2381		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2382				 lk_ksettings->link_modes.supported);
2383	if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
2384		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2385				 lk_ksettings->link_modes.supported);
2386}
2387
2388u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
2389{
2390	switch (fw_link_speed) {
2391	case BNXT_LINK_SPEED_100MB:
2392		return SPEED_100;
2393	case BNXT_LINK_SPEED_1GB:
2394		return SPEED_1000;
2395	case BNXT_LINK_SPEED_2_5GB:
2396		return SPEED_2500;
2397	case BNXT_LINK_SPEED_10GB:
2398		return SPEED_10000;
2399	case BNXT_LINK_SPEED_20GB:
2400		return SPEED_20000;
2401	case BNXT_LINK_SPEED_25GB:
2402		return SPEED_25000;
2403	case BNXT_LINK_SPEED_40GB:
2404		return SPEED_40000;
2405	case BNXT_LINK_SPEED_50GB:
2406	case BNXT_LINK_SPEED_50GB_PAM4:
2407		return SPEED_50000;
2408	case BNXT_LINK_SPEED_100GB:
2409	case BNXT_LINK_SPEED_100GB_PAM4:
2410	case BNXT_LINK_SPEED_100GB_PAM4_112:
2411		return SPEED_100000;
2412	case BNXT_LINK_SPEED_200GB:
2413	case BNXT_LINK_SPEED_200GB_PAM4:
2414	case BNXT_LINK_SPEED_200GB_PAM4_112:
2415		return SPEED_200000;
2416	case BNXT_LINK_SPEED_400GB:
2417	case BNXT_LINK_SPEED_400GB_PAM4:
2418	case BNXT_LINK_SPEED_400GB_PAM4_112:
2419		return SPEED_400000;
2420	default:
2421		return SPEED_UNKNOWN;
2422	}
2423}
2424
2425static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
2426				    struct bnxt_link_info *link_info)
2427{
2428	struct ethtool_link_settings *base = &lk_ksettings->base;
2429
2430	if (link_info->link_state == BNXT_LINK_STATE_UP) {
2431		base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
2432		base->duplex = DUPLEX_HALF;
2433		if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2434			base->duplex = DUPLEX_FULL;
2435		lk_ksettings->lanes = link_info->active_lanes;
2436	} else if (!link_info->autoneg) {
2437		base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
2438		base->duplex = DUPLEX_HALF;
2439		if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
2440			base->duplex = DUPLEX_FULL;
2441	}
2442}
2443
2444static int bnxt_get_link_ksettings(struct net_device *dev,
2445				   struct ethtool_link_ksettings *lk_ksettings)
2446{
2447	struct ethtool_link_settings *base = &lk_ksettings->base;
2448	enum ethtool_link_mode_bit_indices link_mode;
2449	struct bnxt *bp = netdev_priv(dev);
2450	struct bnxt_link_info *link_info;
2451	enum bnxt_media_type media;
2452
2453	ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising);
2454	ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
2455	ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
2456	base->duplex = DUPLEX_UNKNOWN;
2457	base->speed = SPEED_UNKNOWN;
2458	link_info = &bp->link_info;
2459
2460	mutex_lock(&bp->link_lock);
2461	bnxt_get_ethtool_modes(link_info, lk_ksettings);
2462	media = bnxt_get_media(link_info);
2463	bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
2464	bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
2465	link_mode = bnxt_get_link_mode(link_info);
2466	if (link_mode != BNXT_LINK_MODE_UNKNOWN)
2467		ethtool_params_from_link_mode(lk_ksettings, link_mode);
2468	else
2469		bnxt_get_default_speeds(lk_ksettings, link_info);
2470
2471	if (link_info->autoneg) {
2472		bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
2473		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2474				 lk_ksettings->link_modes.advertising);
2475		base->autoneg = AUTONEG_ENABLE;
2476		bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
2477		if (link_info->phy_link_status == BNXT_LINK_LINK)
2478			bnxt_get_all_ethtool_lp_speeds(link_info, media,
2479						       lk_ksettings);
2480	} else {
2481		base->autoneg = AUTONEG_DISABLE;
2482	}
2483
2484	base->port = PORT_NONE;
2485	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2486		base->port = PORT_TP;
2487		linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2488				 lk_ksettings->link_modes.supported);
2489		linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2490				 lk_ksettings->link_modes.advertising);
2491	} else {
2492		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2493				 lk_ksettings->link_modes.supported);
2494		linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2495				 lk_ksettings->link_modes.advertising);
2496
2497		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
2498			base->port = PORT_DA;
2499		else
2500			base->port = PORT_FIBRE;
2501	}
2502	base->phy_address = link_info->phy_addr;
2503	mutex_unlock(&bp->link_lock);
2504
2505	return 0;
2506}
2507
2508static int
2509bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
2510{
2511	struct bnxt *bp = netdev_priv(dev);
2512	struct bnxt_link_info *link_info = &bp->link_info;
2513	u16 support_pam4_spds = link_info->support_pam4_speeds;
2514	u16 support_spds2 = link_info->support_speeds2;
2515	u16 support_spds = link_info->support_speeds;
2516	u8 sig_mode = BNXT_SIG_MODE_NRZ;
2517	u32 lanes_needed = 1;
2518	u16 fw_speed = 0;
2519
2520	switch (ethtool_speed) {
2521	case SPEED_100:
2522		if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
2523			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
2524		break;
2525	case SPEED_1000:
2526		if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
2527		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
2528			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2529		break;
2530	case SPEED_2500:
2531		if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
2532			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
2533		break;
2534	case SPEED_10000:
2535		if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
2536		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
2537			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2538		break;
2539	case SPEED_20000:
2540		if (support_spds & BNXT_LINK_SPEED_MSK_20GB) {
2541			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
2542			lanes_needed = 2;
2543		}
2544		break;
2545	case SPEED_25000:
2546		if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
2547		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
2548			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2549		break;
2550	case SPEED_40000:
2551		if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
2552		    (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
2553			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2554			lanes_needed = 4;
2555		}
2556		break;
2557	case SPEED_50000:
2558		if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
2559		     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
2560		    lanes != 1) {
2561			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2562			lanes_needed = 2;
2563		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
2564			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
2565			sig_mode = BNXT_SIG_MODE_PAM4;
2566		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
2567			fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
2568			sig_mode = BNXT_SIG_MODE_PAM4;
2569		}
2570		break;
2571	case SPEED_100000:
2572		if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
2573		     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
2574		    lanes != 2 && lanes != 1) {
2575			fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
2576			lanes_needed = 4;
2577		} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
2578			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
2579			sig_mode = BNXT_SIG_MODE_PAM4;
2580			lanes_needed = 2;
2581		} else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
2582			   lanes != 1) {
2583			fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
2584			sig_mode = BNXT_SIG_MODE_PAM4;
2585			lanes_needed = 2;
2586		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
2587			fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
2588			sig_mode = BNXT_SIG_MODE_PAM4_112;
2589		}
2590		break;
2591	case SPEED_200000:
2592		if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
2593			fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
2594			sig_mode = BNXT_SIG_MODE_PAM4;
2595			lanes_needed = 4;
2596		} else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
2597			   lanes != 2) {
2598			fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
2599			sig_mode = BNXT_SIG_MODE_PAM4;
2600			lanes_needed = 4;
2601		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
2602			fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
2603			sig_mode = BNXT_SIG_MODE_PAM4_112;
2604			lanes_needed = 2;
2605		}
2606		break;
2607	case SPEED_400000:
2608		if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
2609		    lanes != 4) {
2610			fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
2611			sig_mode = BNXT_SIG_MODE_PAM4;
2612			lanes_needed = 8;
2613		} else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
2614			fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
2615			sig_mode = BNXT_SIG_MODE_PAM4_112;
2616			lanes_needed = 4;
2617		}
2618		break;
2619	}
2620
2621	if (!fw_speed) {
2622		netdev_err(dev, "unsupported speed!\n");
2623		return -EINVAL;
2624	}
2625
2626	if (lanes && lanes != lanes_needed) {
2627		netdev_err(dev, "unsupported number of lanes for speed\n");
2628		return -EINVAL;
2629	}
2630
2631	if (link_info->req_link_speed == fw_speed &&
2632	    link_info->req_signal_mode == sig_mode &&
2633	    link_info->autoneg == 0)
2634		return -EALREADY;
2635
2636	link_info->req_link_speed = fw_speed;
2637	link_info->req_signal_mode = sig_mode;
2638	link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
2639	link_info->autoneg = 0;
2640	link_info->advertising = 0;
2641	link_info->advertising_pam4 = 0;
2642
2643	return 0;
2644}
2645
2646u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
2647{
2648	u16 fw_speed_mask = 0;
2649
2650	/* only support autoneg at speed 100, 1000, and 10000 */
2651	if (advertising & (ADVERTISED_100baseT_Full |
2652			   ADVERTISED_100baseT_Half)) {
2653		fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
2654	}
2655	if (advertising & (ADVERTISED_1000baseT_Full |
2656			   ADVERTISED_1000baseT_Half)) {
2657		fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
2658	}
2659	if (advertising & ADVERTISED_10000baseT_Full)
2660		fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
2661
2662	if (advertising & ADVERTISED_40000baseCR4_Full)
2663		fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
2664
2665	return fw_speed_mask;
2666}
2667
2668static int bnxt_set_link_ksettings(struct net_device *dev,
2669			   const struct ethtool_link_ksettings *lk_ksettings)
2670{
2671	struct bnxt *bp = netdev_priv(dev);
2672	struct bnxt_link_info *link_info = &bp->link_info;
2673	const struct ethtool_link_settings *base = &lk_ksettings->base;
2674	bool set_pause = false;
2675	u32 speed, lanes = 0;
2676	int rc = 0;
2677
2678	if (!BNXT_PHY_CFG_ABLE(bp))
2679		return -EOPNOTSUPP;
2680
2681	mutex_lock(&bp->link_lock);
2682	if (base->autoneg == AUTONEG_ENABLE) {
2683		bnxt_set_ethtool_speeds(link_info,
2684					lk_ksettings->link_modes.advertising);
2685		link_info->autoneg |= BNXT_AUTONEG_SPEED;
2686		if (!link_info->advertising && !link_info->advertising_pam4) {
2687			link_info->advertising = link_info->support_auto_speeds;
2688			link_info->advertising_pam4 =
2689				link_info->support_pam4_auto_speeds;
2690		}
2691		/* any change to autoneg will cause link change, therefore the
2692		 * driver should put back the original pause setting in autoneg
2693		 */
2694		if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2695			set_pause = true;
2696	} else {
2697		u8 phy_type = link_info->phy_type;
2698
2699		if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
2700		    phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
2701		    link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2702			netdev_err(dev, "10GBase-T devices must autoneg\n");
2703			rc = -EINVAL;
2704			goto set_setting_exit;
2705		}
2706		if (base->duplex == DUPLEX_HALF) {
2707			netdev_err(dev, "HALF DUPLEX is not supported!\n");
2708			rc = -EINVAL;
2709			goto set_setting_exit;
2710		}
2711		speed = base->speed;
2712		lanes = lk_ksettings->lanes;
2713		rc = bnxt_force_link_speed(dev, speed, lanes);
2714		if (rc) {
2715			if (rc == -EALREADY)
2716				rc = 0;
2717			goto set_setting_exit;
2718		}
2719	}
2720
2721	if (netif_running(dev))
2722		rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
2723
2724set_setting_exit:
2725	mutex_unlock(&bp->link_lock);
2726	return rc;
2727}
2728
2729static int bnxt_get_fecparam(struct net_device *dev,
2730			     struct ethtool_fecparam *fec)
2731{
2732	struct bnxt *bp = netdev_priv(dev);
2733	struct bnxt_link_info *link_info;
2734	u8 active_fec;
2735	u16 fec_cfg;
2736
2737	link_info = &bp->link_info;
2738	fec_cfg = link_info->fec_cfg;
2739	active_fec = link_info->active_fec_sig_mode &
2740		     PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
2741	if (fec_cfg & BNXT_FEC_NONE) {
2742		fec->fec = ETHTOOL_FEC_NONE;
2743		fec->active_fec = ETHTOOL_FEC_NONE;
2744		return 0;
2745	}
2746	if (fec_cfg & BNXT_FEC_AUTONEG)
2747		fec->fec |= ETHTOOL_FEC_AUTO;
2748	if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2749		fec->fec |= ETHTOOL_FEC_BASER;
2750	if (fec_cfg & BNXT_FEC_ENC_RS)
2751		fec->fec |= ETHTOOL_FEC_RS;
2752	if (fec_cfg & BNXT_FEC_ENC_LLRS)
2753		fec->fec |= ETHTOOL_FEC_LLRS;
2754
2755	switch (active_fec) {
2756	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
2757		fec->active_fec |= ETHTOOL_FEC_BASER;
2758		break;
2759	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
2760	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
2761	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
2762		fec->active_fec |= ETHTOOL_FEC_RS;
2763		break;
2764	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
2765	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
2766		fec->active_fec |= ETHTOOL_FEC_LLRS;
2767		break;
2768	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
2769		fec->active_fec |= ETHTOOL_FEC_OFF;
2770		break;
2771	}
2772	return 0;
2773}
2774
2775static void bnxt_get_fec_stats(struct net_device *dev,
2776			       struct ethtool_fec_stats *fec_stats)
2777{
2778	struct bnxt *bp = netdev_priv(dev);
2779	u64 *rx;
2780
2781	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
2782		return;
2783
2784	rx = bp->rx_port_stats_ext.sw_stats;
2785	fec_stats->corrected_bits.total =
2786		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
2787
2788	if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
2789		return;
2790
2791	fec_stats->corrected_blocks.total =
2792		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
2793	fec_stats->uncorrectable_blocks.total =
2794		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
2795}
2796
2797static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
2798					 u32 fec)
2799{
2800	u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
2801
2802	if (fec & ETHTOOL_FEC_BASER)
2803		fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
2804	else if (fec & ETHTOOL_FEC_RS)
2805		fw_fec |= BNXT_FEC_RS_ON(link_info);
2806	else if (fec & ETHTOOL_FEC_LLRS)
2807		fw_fec |= BNXT_FEC_LLRS_ON;
2808	return fw_fec;
2809}
2810
2811static int bnxt_set_fecparam(struct net_device *dev,
2812			     struct ethtool_fecparam *fecparam)
2813{
2814	struct hwrm_port_phy_cfg_input *req;
2815	struct bnxt *bp = netdev_priv(dev);
2816	struct bnxt_link_info *link_info;
2817	u32 new_cfg, fec = fecparam->fec;
2818	u16 fec_cfg;
2819	int rc;
2820
2821	link_info = &bp->link_info;
2822	fec_cfg = link_info->fec_cfg;
2823	if (fec_cfg & BNXT_FEC_NONE)
2824		return -EOPNOTSUPP;
2825
2826	if (fec & ETHTOOL_FEC_OFF) {
2827		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
2828			  BNXT_FEC_ALL_OFF(link_info);
2829		goto apply_fec;
2830	}
2831	if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
2832	    ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
2833	    ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
2834	    ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
2835		return -EINVAL;
2836
2837	if (fec & ETHTOOL_FEC_AUTO) {
2838		if (!link_info->autoneg)
2839			return -EINVAL;
2840		new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
2841	} else {
2842		new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
2843	}
2844
2845apply_fec:
2846	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
2847	if (rc)
2848		return rc;
2849	req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2850	rc = hwrm_req_send(bp, req);
2851	/* update current settings */
2852	if (!rc) {
2853		mutex_lock(&bp->link_lock);
2854		bnxt_update_link(bp, false);
2855		mutex_unlock(&bp->link_lock);
2856	}
2857	return rc;
2858}
2859
2860static void bnxt_get_pauseparam(struct net_device *dev,
2861				struct ethtool_pauseparam *epause)
2862{
2863	struct bnxt *bp = netdev_priv(dev);
2864	struct bnxt_link_info *link_info = &bp->link_info;
2865
2866	if (BNXT_VF(bp))
2867		return;
2868	epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
2869	epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
2870	epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
2871}
2872
2873static void bnxt_get_pause_stats(struct net_device *dev,
2874				 struct ethtool_pause_stats *epstat)
2875{
2876	struct bnxt *bp = netdev_priv(dev);
2877	u64 *rx, *tx;
2878
2879	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
2880		return;
2881
2882	rx = bp->port_stats.sw_stats;
2883	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
2884
2885	epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
2886	epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
2887}
2888
2889static int bnxt_set_pauseparam(struct net_device *dev,
2890			       struct ethtool_pauseparam *epause)
2891{
2892	int rc = 0;
2893	struct bnxt *bp = netdev_priv(dev);
2894	struct bnxt_link_info *link_info = &bp->link_info;
2895
2896	if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2897		return -EOPNOTSUPP;
2898
2899	mutex_lock(&bp->link_lock);
2900	if (epause->autoneg) {
2901		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2902			rc = -EINVAL;
2903			goto pause_exit;
2904		}
2905
2906		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
2907		link_info->req_flow_ctrl = 0;
2908	} else {
2909		/* when transition from auto pause to force pause,
2910		 * force a link change
2911		 */
2912		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2913			link_info->force_link_chng = true;
2914		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
2915		link_info->req_flow_ctrl = 0;
2916	}
2917	if (epause->rx_pause)
2918		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
2919
2920	if (epause->tx_pause)
2921		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
2922
2923	if (netif_running(dev))
2924		rc = bnxt_hwrm_set_pause(bp);
2925
2926pause_exit:
2927	mutex_unlock(&bp->link_lock);
2928	return rc;
2929}
2930
2931static u32 bnxt_get_link(struct net_device *dev)
2932{
2933	struct bnxt *bp = netdev_priv(dev);
2934
2935	/* TODO: handle MF, VF, driver close case */
2936	return BNXT_LINK_IS_UP(bp);
2937}
2938
2939int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
2940			       struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
2941{
2942	struct hwrm_nvm_get_dev_info_output *resp;
2943	struct hwrm_nvm_get_dev_info_input *req;
2944	int rc;
2945
2946	if (BNXT_VF(bp))
2947		return -EOPNOTSUPP;
2948
2949	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
2950	if (rc)
2951		return rc;
2952
2953	resp = hwrm_req_hold(bp, req);
2954	rc = hwrm_req_send(bp, req);
2955	if (!rc)
2956		memcpy(nvm_dev_info, resp, sizeof(*resp));
2957	hwrm_req_drop(bp, req);
2958	return rc;
2959}
2960
2961static void bnxt_print_admin_err(struct bnxt *bp)
2962{
2963	netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
2964}
2965
2966int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2967			 u16 ext, u16 *index, u32 *item_length,
2968			 u32 *data_length);
2969
2970int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2971		     u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2972		     u32 dir_item_len, const u8 *data,
2973		     size_t data_len)
2974{
2975	struct bnxt *bp = netdev_priv(dev);
2976	struct hwrm_nvm_write_input *req;
2977	int rc;
2978
2979	rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
2980	if (rc)
2981		return rc;
2982
2983	if (data_len && data) {
2984		dma_addr_t dma_handle;
2985		u8 *kmem;
2986
2987		kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
2988		if (!kmem) {
2989			hwrm_req_drop(bp, req);
2990			return -ENOMEM;
2991		}
2992
2993		req->dir_data_length = cpu_to_le32(data_len);
2994
2995		memcpy(kmem, data, data_len);
2996		req->host_src_addr = cpu_to_le64(dma_handle);
2997	}
2998
2999	hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
3000	req->dir_type = cpu_to_le16(dir_type);
3001	req->dir_ordinal = cpu_to_le16(dir_ordinal);
3002	req->dir_ext = cpu_to_le16(dir_ext);
3003	req->dir_attr = cpu_to_le16(dir_attr);
3004	req->dir_item_length = cpu_to_le32(dir_item_len);
3005	rc = hwrm_req_send(bp, req);
3006
3007	if (rc == -EACCES)
3008		bnxt_print_admin_err(bp);
3009	return rc;
3010}
3011
3012int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
3013			     u8 self_reset, u8 flags)
3014{
3015	struct bnxt *bp = netdev_priv(dev);
3016	struct hwrm_fw_reset_input *req;
3017	int rc;
3018
3019	if (!bnxt_hwrm_reset_permitted(bp)) {
3020		netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
3021		return -EPERM;
3022	}
3023
3024	rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
3025	if (rc)
3026		return rc;
3027
3028	req->embedded_proc_type = proc_type;
3029	req->selfrst_status = self_reset;
3030	req->flags = flags;
3031
3032	if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
3033		rc = hwrm_req_send_silent(bp, req);
3034	} else {
3035		rc = hwrm_req_send(bp, req);
3036		if (rc == -EACCES)
3037			bnxt_print_admin_err(bp);
3038	}
3039	return rc;
3040}
3041
3042static int bnxt_firmware_reset(struct net_device *dev,
3043			       enum bnxt_nvm_directory_type dir_type)
3044{
3045	u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
3046	u8 proc_type, flags = 0;
3047
3048	/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
3049	/*       (e.g. when firmware isn't already running) */
3050	switch (dir_type) {
3051	case BNX_DIR_TYPE_CHIMP_PATCH:
3052	case BNX_DIR_TYPE_BOOTCODE:
3053	case BNX_DIR_TYPE_BOOTCODE_2:
3054		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
3055		/* Self-reset ChiMP upon next PCIe reset: */
3056		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3057		break;
3058	case BNX_DIR_TYPE_APE_FW:
3059	case BNX_DIR_TYPE_APE_PATCH:
3060		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
3061		/* Self-reset APE upon next PCIe reset: */
3062		self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3063		break;
3064	case BNX_DIR_TYPE_KONG_FW:
3065	case BNX_DIR_TYPE_KONG_PATCH:
3066		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
3067		break;
3068	case BNX_DIR_TYPE_BONO_FW:
3069	case BNX_DIR_TYPE_BONO_PATCH:
3070		proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
3071		break;
3072	default:
3073		return -EINVAL;
3074	}
3075
3076	return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
3077}
3078
3079static int bnxt_firmware_reset_chip(struct net_device *dev)
3080{
3081	struct bnxt *bp = netdev_priv(dev);
3082	u8 flags = 0;
3083
3084	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
3085		flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
3086
3087	return bnxt_hwrm_firmware_reset(dev,
3088					FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
3089					FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
3090					flags);
3091}
3092
3093static int bnxt_firmware_reset_ap(struct net_device *dev)
3094{
3095	return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
3096					FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
3097					0);
3098}
3099
3100static int bnxt_flash_firmware(struct net_device *dev,
3101			       u16 dir_type,
3102			       const u8 *fw_data,
3103			       size_t fw_size)
3104{
3105	int	rc = 0;
3106	u16	code_type;
3107	u32	stored_crc;
3108	u32	calculated_crc;
3109	struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
3110
3111	switch (dir_type) {
3112	case BNX_DIR_TYPE_BOOTCODE:
3113	case BNX_DIR_TYPE_BOOTCODE_2:
3114		code_type = CODE_BOOT;
3115		break;
3116	case BNX_DIR_TYPE_CHIMP_PATCH:
3117		code_type = CODE_CHIMP_PATCH;
3118		break;
3119	case BNX_DIR_TYPE_APE_FW:
3120		code_type = CODE_MCTP_PASSTHRU;
3121		break;
3122	case BNX_DIR_TYPE_APE_PATCH:
3123		code_type = CODE_APE_PATCH;
3124		break;
3125	case BNX_DIR_TYPE_KONG_FW:
3126		code_type = CODE_KONG_FW;
3127		break;
3128	case BNX_DIR_TYPE_KONG_PATCH:
3129		code_type = CODE_KONG_PATCH;
3130		break;
3131	case BNX_DIR_TYPE_BONO_FW:
3132		code_type = CODE_BONO_FW;
3133		break;
3134	case BNX_DIR_TYPE_BONO_PATCH:
3135		code_type = CODE_BONO_PATCH;
3136		break;
3137	default:
3138		netdev_err(dev, "Unsupported directory entry type: %u\n",
3139			   dir_type);
3140		return -EINVAL;
3141	}
3142	if (fw_size < sizeof(struct bnxt_fw_header)) {
3143		netdev_err(dev, "Invalid firmware file size: %u\n",
3144			   (unsigned int)fw_size);
3145		return -EINVAL;
3146	}
3147	if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
3148		netdev_err(dev, "Invalid firmware signature: %08X\n",
3149			   le32_to_cpu(header->signature));
3150		return -EINVAL;
3151	}
3152	if (header->code_type != code_type) {
3153		netdev_err(dev, "Expected firmware type: %d, read: %d\n",
3154			   code_type, header->code_type);
3155		return -EINVAL;
3156	}
3157	if (header->device != DEVICE_CUMULUS_FAMILY) {
3158		netdev_err(dev, "Expected firmware device family %d, read: %d\n",
3159			   DEVICE_CUMULUS_FAMILY, header->device);
3160		return -EINVAL;
3161	}
3162	/* Confirm the CRC32 checksum of the file: */
3163	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3164					     sizeof(stored_crc)));
3165	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3166	if (calculated_crc != stored_crc) {
3167		netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
3168			   (unsigned long)stored_crc,
3169			   (unsigned long)calculated_crc);
3170		return -EINVAL;
3171	}
3172	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3173			      0, 0, 0, fw_data, fw_size);
3174	if (rc == 0)	/* Firmware update successful */
3175		rc = bnxt_firmware_reset(dev, dir_type);
3176
3177	return rc;
3178}
3179
3180static int bnxt_flash_microcode(struct net_device *dev,
3181				u16 dir_type,
3182				const u8 *fw_data,
3183				size_t fw_size)
3184{
3185	struct bnxt_ucode_trailer *trailer;
3186	u32 calculated_crc;
3187	u32 stored_crc;
3188	int rc = 0;
3189
3190	if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
3191		netdev_err(dev, "Invalid microcode file size: %u\n",
3192			   (unsigned int)fw_size);
3193		return -EINVAL;
3194	}
3195	trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
3196						sizeof(*trailer)));
3197	if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
3198		netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
3199			   le32_to_cpu(trailer->sig));
3200		return -EINVAL;
3201	}
3202	if (le16_to_cpu(trailer->dir_type) != dir_type) {
3203		netdev_err(dev, "Expected microcode type: %d, read: %d\n",
3204			   dir_type, le16_to_cpu(trailer->dir_type));
3205		return -EINVAL;
3206	}
3207	if (le16_to_cpu(trailer->trailer_length) <
3208		sizeof(struct bnxt_ucode_trailer)) {
3209		netdev_err(dev, "Invalid microcode trailer length: %d\n",
3210			   le16_to_cpu(trailer->trailer_length));
3211		return -EINVAL;
3212	}
3213
3214	/* Confirm the CRC32 checksum of the file: */
3215	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3216					     sizeof(stored_crc)));
3217	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3218	if (calculated_crc != stored_crc) {
3219		netdev_err(dev,
3220			   "CRC32 (%08lX) does not match calculated: %08lX\n",
3221			   (unsigned long)stored_crc,
3222			   (unsigned long)calculated_crc);
3223		return -EINVAL;
3224	}
3225	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3226			      0, 0, 0, fw_data, fw_size);
3227
3228	return rc;
3229}
3230
3231static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
3232{
3233	switch (dir_type) {
3234	case BNX_DIR_TYPE_CHIMP_PATCH:
3235	case BNX_DIR_TYPE_BOOTCODE:
3236	case BNX_DIR_TYPE_BOOTCODE_2:
3237	case BNX_DIR_TYPE_APE_FW:
3238	case BNX_DIR_TYPE_APE_PATCH:
3239	case BNX_DIR_TYPE_KONG_FW:
3240	case BNX_DIR_TYPE_KONG_PATCH:
3241	case BNX_DIR_TYPE_BONO_FW:
3242	case BNX_DIR_TYPE_BONO_PATCH:
3243		return true;
3244	}
3245
3246	return false;
3247}
3248
3249static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
3250{
3251	switch (dir_type) {
3252	case BNX_DIR_TYPE_AVS:
3253	case BNX_DIR_TYPE_EXP_ROM_MBA:
3254	case BNX_DIR_TYPE_PCIE:
3255	case BNX_DIR_TYPE_TSCF_UCODE:
3256	case BNX_DIR_TYPE_EXT_PHY:
3257	case BNX_DIR_TYPE_CCM:
3258	case BNX_DIR_TYPE_ISCSI_BOOT:
3259	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3260	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3261		return true;
3262	}
3263
3264	return false;
3265}
3266
3267static bool bnxt_dir_type_is_executable(u16 dir_type)
3268{
3269	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3270		bnxt_dir_type_is_other_exec_format(dir_type);
3271}
3272
3273static int bnxt_flash_firmware_from_file(struct net_device *dev,
3274					 u16 dir_type,
3275					 const char *filename)
3276{
3277	const struct firmware  *fw;
3278	int			rc;
3279
3280	rc = request_firmware(&fw, filename, &dev->dev);
3281	if (rc != 0) {
3282		netdev_err(dev, "Error %d requesting firmware file: %s\n",
3283			   rc, filename);
3284		return rc;
3285	}
3286	if (bnxt_dir_type_is_ape_bin_format(dir_type))
3287		rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
3288	else if (bnxt_dir_type_is_other_exec_format(dir_type))
3289		rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
3290	else
3291		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3292				      0, 0, 0, fw->data, fw->size);
3293	release_firmware(fw);
3294	return rc;
3295}
3296
3297#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
3298#define MSG_INVALID_PKG "PKG install error : Invalid package"
3299#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
3300#define MSG_INVALID_DEV "PKG install error : Invalid device"
3301#define MSG_INTERNAL_ERR "PKG install error : Internal error"
3302#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
3303#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
3304#define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
3305#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
3306#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
3307
3308static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
3309				    struct netlink_ext_ack *extack)
3310{
3311	switch (result) {
3312	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
3313	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
3314	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
3315	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
3316	case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
3317	case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
3318		BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
3319		return -EINVAL;
3320	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
3321	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
3322	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
3323	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
3324	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
3325	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
3326	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
3327	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
3328	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
3329	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
3330	case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
3331	case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
3332	case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
3333		BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
3334		return -ENOPKG;
3335	case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
3336		BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
3337		return -EPERM;
3338	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
3339	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
3340	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
3341	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
3342	case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
3343		BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
3344		return -EOPNOTSUPP;
3345	default:
3346		BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
3347		return -EIO;
3348	}
3349}
3350
3351#define BNXT_PKG_DMA_SIZE	0x40000
3352#define BNXT_NVM_MORE_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
3353#define BNXT_NVM_LAST_FLAG	(cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
3354
3355static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
3356				    struct netlink_ext_ack *extack)
3357{
3358	u32 item_len;
3359	int rc;
3360
3361	rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3362				  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
3363				  &item_len, NULL);
3364	if (rc) {
3365		BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3366		return rc;
3367	}
3368
3369	if (fw_size > item_len) {
3370		rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
3371				      BNX_DIR_ORDINAL_FIRST, 0, 1,
3372				      round_up(fw_size, 4096), NULL, 0);
3373		if (rc) {
3374			BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
3375			return rc;
3376		}
3377	}
3378	return 0;
3379}
3380
3381int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
3382				   u32 install_type, struct netlink_ext_ack *extack)
3383{
3384	struct hwrm_nvm_install_update_input *install;
3385	struct hwrm_nvm_install_update_output *resp;
3386	struct hwrm_nvm_modify_input *modify;
3387	struct bnxt *bp = netdev_priv(dev);
3388	bool defrag_attempted = false;
3389	dma_addr_t dma_handle;
3390	u8 *kmem = NULL;
3391	u32 modify_len;
3392	u32 item_len;
3393	u8 cmd_err;
3394	u16 index;
3395	int rc;
3396
3397	/* resize before flashing larger image than available space */
3398	rc = bnxt_resize_update_entry(dev, fw->size, extack);
3399	if (rc)
3400		return rc;
3401
3402	bnxt_hwrm_fw_set_time(bp);
3403
3404	rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
3405	if (rc)
3406		return rc;
3407
3408	/* Try allocating a large DMA buffer first.  Older fw will
3409	 * cause excessive NVRAM erases when using small blocks.
3410	 */
3411	modify_len = roundup_pow_of_two(fw->size);
3412	modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
3413	while (1) {
3414		kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
3415		if (!kmem && modify_len > PAGE_SIZE)
3416			modify_len /= 2;
3417		else
3418			break;
3419	}
3420	if (!kmem) {
3421		hwrm_req_drop(bp, modify);
3422		return -ENOMEM;
3423	}
3424
3425	rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
3426	if (rc) {
3427		hwrm_req_drop(bp, modify);
3428		return rc;
3429	}
3430
3431	hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
3432	hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
3433
3434	hwrm_req_hold(bp, modify);
3435	modify->host_src_addr = cpu_to_le64(dma_handle);
3436
3437	resp = hwrm_req_hold(bp, install);
3438	if ((install_type & 0xffff) == 0)
3439		install_type >>= 16;
3440	install->install_type = cpu_to_le32(install_type);
3441
3442	do {
3443		u32 copied = 0, len = modify_len;
3444
3445		rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3446					  BNX_DIR_ORDINAL_FIRST,
3447					  BNX_DIR_EXT_NONE,
3448					  &index, &item_len, NULL);
3449		if (rc) {
3450			BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3451			break;
3452		}
3453		if (fw->size > item_len) {
3454			BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
3455			rc = -EFBIG;
3456			break;
3457		}
3458
3459		modify->dir_idx = cpu_to_le16(index);
3460
3461		if (fw->size > modify_len)
3462			modify->flags = BNXT_NVM_MORE_FLAG;
3463		while (copied < fw->size) {
3464			u32 balance = fw->size - copied;
3465
3466			if (balance <= modify_len) {
3467				len = balance;
3468				if (copied)
3469					modify->flags |= BNXT_NVM_LAST_FLAG;
3470			}
3471			memcpy(kmem, fw->data + copied, len);
3472			modify->len = cpu_to_le32(len);
3473			modify->offset = cpu_to_le32(copied);
3474			rc = hwrm_req_send(bp, modify);
3475			if (rc)
3476				goto pkg_abort;
3477			copied += len;
3478		}
3479
3480		rc = hwrm_req_send_silent(bp, install);
3481		if (!rc)
3482			break;
3483
3484		if (defrag_attempted) {
3485			/* We have tried to defragment already in the previous
3486			 * iteration. Return with the result for INSTALL_UPDATE
3487			 */
3488			break;
3489		}
3490
3491		cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3492
3493		switch (cmd_err) {
3494		case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
3495			BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
3496			rc = -EALREADY;
3497			break;
3498		case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
3499			install->flags =
3500				cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
3501
3502			rc = hwrm_req_send_silent(bp, install);
3503			if (!rc)
3504				break;
3505
3506			cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3507
3508			if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
3509				/* FW has cleared NVM area, driver will create
3510				 * UPDATE directory and try the flash again
3511				 */
3512				defrag_attempted = true;
3513				install->flags = 0;
3514				rc = bnxt_flash_nvram(bp->dev,
3515						      BNX_DIR_TYPE_UPDATE,
3516						      BNX_DIR_ORDINAL_FIRST,
3517						      0, 0, item_len, NULL, 0);
3518				if (!rc)
3519					break;
3520			}
3521			fallthrough;
3522		default:
3523			BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
3524		}
3525	} while (defrag_attempted && !rc);
3526
3527pkg_abort:
3528	hwrm_req_drop(bp, modify);
3529	hwrm_req_drop(bp, install);
3530
3531	if (resp->result) {
3532		netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
3533			   (s8)resp->result, (int)resp->problem_item);
3534		rc = nvm_update_err_to_stderr(dev, resp->result, extack);
3535	}
3536	if (rc == -EACCES)
3537		bnxt_print_admin_err(bp);
3538	return rc;
3539}
3540
3541static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
3542					u32 install_type, struct netlink_ext_ack *extack)
3543{
3544	const struct firmware *fw;
3545	int rc;
3546
3547	rc = request_firmware(&fw, filename, &dev->dev);
3548	if (rc != 0) {
3549		netdev_err(dev, "PKG error %d requesting file: %s\n",
3550			   rc, filename);
3551		return rc;
3552	}
3553
3554	rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
3555
3556	release_firmware(fw);
3557
3558	return rc;
3559}
3560
3561static int bnxt_flash_device(struct net_device *dev,
3562			     struct ethtool_flash *flash)
3563{
3564	if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
3565		netdev_err(dev, "flashdev not supported from a virtual function\n");
3566		return -EINVAL;
3567	}
3568
3569	if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
3570	    flash->region > 0xffff)
3571		return bnxt_flash_package_from_file(dev, flash->data,
3572						    flash->region, NULL);
3573
3574	return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
3575}
3576
3577static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
3578{
3579	struct hwrm_nvm_get_dir_info_output *output;
3580	struct hwrm_nvm_get_dir_info_input *req;
3581	struct bnxt *bp = netdev_priv(dev);
3582	int rc;
3583
3584	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
3585	if (rc)
3586		return rc;
3587
3588	output = hwrm_req_hold(bp, req);
3589	rc = hwrm_req_send(bp, req);
3590	if (!rc) {
3591		*entries = le32_to_cpu(output->entries);
3592		*length = le32_to_cpu(output->entry_length);
3593	}
3594	hwrm_req_drop(bp, req);
3595	return rc;
3596}
3597
3598static int bnxt_get_eeprom_len(struct net_device *dev)
3599{
3600	struct bnxt *bp = netdev_priv(dev);
3601
3602	if (BNXT_VF(bp))
3603		return 0;
3604
3605	/* The -1 return value allows the entire 32-bit range of offsets to be
3606	 * passed via the ethtool command-line utility.
3607	 */
3608	return -1;
3609}
3610
3611static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
3612{
3613	struct bnxt *bp = netdev_priv(dev);
3614	int rc;
3615	u32 dir_entries;
3616	u32 entry_length;
3617	u8 *buf;
3618	size_t buflen;
3619	dma_addr_t dma_handle;
3620	struct hwrm_nvm_get_dir_entries_input *req;
3621
3622	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
3623	if (rc != 0)
3624		return rc;
3625
3626	if (!dir_entries || !entry_length)
3627		return -EIO;
3628
3629	/* Insert 2 bytes of directory info (count and size of entries) */
3630	if (len < 2)
3631		return -EINVAL;
3632
3633	*data++ = dir_entries;
3634	*data++ = entry_length;
3635	len -= 2;
3636	memset(data, 0xff, len);
3637
3638	rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
3639	if (rc)
3640		return rc;
3641
3642	buflen = mul_u32_u32(dir_entries, entry_length);
3643	buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
3644	if (!buf) {
3645		hwrm_req_drop(bp, req);
3646		return -ENOMEM;
3647	}
3648	req->host_dest_addr = cpu_to_le64(dma_handle);
3649
3650	hwrm_req_hold(bp, req); /* hold the slice */
3651	rc = hwrm_req_send(bp, req);
3652	if (rc == 0)
3653		memcpy(data, buf, len > buflen ? buflen : len);
3654	hwrm_req_drop(bp, req);
3655	return rc;
3656}
3657
3658int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
3659			u32 length, u8 *data)
3660{
3661	struct bnxt *bp = netdev_priv(dev);
3662	int rc;
3663	u8 *buf;
3664	dma_addr_t dma_handle;
3665	struct hwrm_nvm_read_input *req;
3666
3667	if (!length)
3668		return -EINVAL;
3669
3670	rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
3671	if (rc)
3672		return rc;
3673
3674	buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
3675	if (!buf) {
3676		hwrm_req_drop(bp, req);
3677		return -ENOMEM;
3678	}
3679
3680	req->host_dest_addr = cpu_to_le64(dma_handle);
3681	req->dir_idx = cpu_to_le16(index);
3682	req->offset = cpu_to_le32(offset);
3683	req->len = cpu_to_le32(length);
3684
3685	hwrm_req_hold(bp, req); /* hold the slice */
3686	rc = hwrm_req_send(bp, req);
3687	if (rc == 0)
3688		memcpy(data, buf, length);
3689	hwrm_req_drop(bp, req);
3690	return rc;
3691}
3692
3693int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
3694			 u16 ext, u16 *index, u32 *item_length,
3695			 u32 *data_length)
3696{
3697	struct hwrm_nvm_find_dir_entry_output *output;
3698	struct hwrm_nvm_find_dir_entry_input *req;
3699	struct bnxt *bp = netdev_priv(dev);
3700	int rc;
3701
3702	rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
3703	if (rc)
3704		return rc;
3705
3706	req->enables = 0;
3707	req->dir_idx = 0;
3708	req->dir_type = cpu_to_le16(type);
3709	req->dir_ordinal = cpu_to_le16(ordinal);
3710	req->dir_ext = cpu_to_le16(ext);
3711	req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
3712	output = hwrm_req_hold(bp, req);
3713	rc = hwrm_req_send_silent(bp, req);
3714	if (rc == 0) {
3715		if (index)
3716			*index = le16_to_cpu(output->dir_idx);
3717		if (item_length)
3718			*item_length = le32_to_cpu(output->dir_item_length);
3719		if (data_length)
3720			*data_length = le32_to_cpu(output->dir_data_length);
3721	}
3722	hwrm_req_drop(bp, req);
3723	return rc;
3724}
3725
3726static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
3727{
3728	char	*retval = NULL;
3729	char	*p;
3730	char	*value;
3731	int	field = 0;
3732
3733	if (datalen < 1)
3734		return NULL;
3735	/* null-terminate the log data (removing last '\n'): */
3736	data[datalen - 1] = 0;
3737	for (p = data; *p != 0; p++) {
3738		field = 0;
3739		retval = NULL;
3740		while (*p != 0 && *p != '\n') {
3741			value = p;
3742			while (*p != 0 && *p != '\t' && *p != '\n')
3743				p++;
3744			if (field == desired_field)
3745				retval = value;
3746			if (*p != '\t')
3747				break;
3748			*p = 0;
3749			field++;
3750			p++;
3751		}
3752		if (*p == 0)
3753			break;
3754		*p = 0;
3755	}
3756	return retval;
3757}
3758
3759int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
3760{
3761	struct bnxt *bp = netdev_priv(dev);
3762	u16 index = 0;
3763	char *pkgver;
3764	u32 pkglen;
3765	u8 *pkgbuf;
3766	int rc;
3767
3768	rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
3769				  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
3770				  &index, NULL, &pkglen);
3771	if (rc)
3772		return rc;
3773
3774	pkgbuf = kzalloc(pkglen, GFP_KERNEL);
3775	if (!pkgbuf) {
3776		dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
3777			pkglen);
3778		return -ENOMEM;
3779	}
3780
3781	rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
3782	if (rc)
3783		goto err;
3784
3785	pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
3786				   pkglen);
3787	if (pkgver && *pkgver != 0 && isdigit(*pkgver))
3788		strscpy(ver, pkgver, size);
3789	else
3790		rc = -ENOENT;
3791
3792err:
3793	kfree(pkgbuf);
3794
3795	return rc;
3796}
3797
3798static void bnxt_get_pkgver(struct net_device *dev)
3799{
3800	struct bnxt *bp = netdev_priv(dev);
3801	char buf[FW_VER_STR_LEN];
3802	int len;
3803
3804	if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
3805		len = strlen(bp->fw_ver_str);
3806		snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
3807			 "/pkg %s", buf);
3808	}
3809}
3810
3811static int bnxt_get_eeprom(struct net_device *dev,
3812			   struct ethtool_eeprom *eeprom,
3813			   u8 *data)
3814{
3815	u32 index;
3816	u32 offset;
3817
3818	if (eeprom->offset == 0) /* special offset value to get directory */
3819		return bnxt_get_nvram_directory(dev, eeprom->len, data);
3820
3821	index = eeprom->offset >> 24;
3822	offset = eeprom->offset & 0xffffff;
3823
3824	if (index == 0) {
3825		netdev_err(dev, "unsupported index value: %d\n", index);
3826		return -EINVAL;
3827	}
3828
3829	return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
3830}
3831
3832static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
3833{
3834	struct hwrm_nvm_erase_dir_entry_input *req;
3835	struct bnxt *bp = netdev_priv(dev);
3836	int rc;
3837
3838	rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
3839	if (rc)
3840		return rc;
3841
3842	req->dir_idx = cpu_to_le16(index);
3843	return hwrm_req_send(bp, req);
3844}
3845
3846static int bnxt_set_eeprom(struct net_device *dev,
3847			   struct ethtool_eeprom *eeprom,
3848			   u8 *data)
3849{
3850	struct bnxt *bp = netdev_priv(dev);
3851	u8 index, dir_op;
3852	u16 type, ext, ordinal, attr;
3853
3854	if (!BNXT_PF(bp)) {
3855		netdev_err(dev, "NVM write not supported from a virtual function\n");
3856		return -EINVAL;
3857	}
3858
3859	type = eeprom->magic >> 16;
3860
3861	if (type == 0xffff) { /* special value for directory operations */
3862		index = eeprom->magic & 0xff;
3863		dir_op = eeprom->magic >> 8;
3864		if (index == 0)
3865			return -EINVAL;
3866		switch (dir_op) {
3867		case 0x0e: /* erase */
3868			if (eeprom->offset != ~eeprom->magic)
3869				return -EINVAL;
3870			return bnxt_erase_nvram_directory(dev, index - 1);
3871		default:
3872			return -EINVAL;
3873		}
3874	}
3875
3876	/* Create or re-write an NVM item: */
3877	if (bnxt_dir_type_is_executable(type))
3878		return -EOPNOTSUPP;
3879	ext = eeprom->magic & 0xffff;
3880	ordinal = eeprom->offset >> 16;
3881	attr = eeprom->offset & 0xffff;
3882
3883	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
3884				eeprom->len);
3885}
3886
3887static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
3888{
3889	struct bnxt *bp = netdev_priv(dev);
3890	struct ethtool_eee *eee = &bp->eee;
3891	struct bnxt_link_info *link_info = &bp->link_info;
3892	u32 advertising;
3893	int rc = 0;
3894
3895	if (!BNXT_PHY_CFG_ABLE(bp))
3896		return -EOPNOTSUPP;
3897
3898	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3899		return -EOPNOTSUPP;
3900
3901	mutex_lock(&bp->link_lock);
3902	advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
3903	if (!edata->eee_enabled)
3904		goto eee_ok;
3905
3906	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3907		netdev_warn(dev, "EEE requires autoneg\n");
3908		rc = -EINVAL;
3909		goto eee_exit;
3910	}
3911	if (edata->tx_lpi_enabled) {
3912		if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
3913				       edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
3914			netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
3915				    bp->lpi_tmr_lo, bp->lpi_tmr_hi);
3916			rc = -EINVAL;
3917			goto eee_exit;
3918		} else if (!bp->lpi_tmr_hi) {
3919			edata->tx_lpi_timer = eee->tx_lpi_timer;
3920		}
3921	}
3922	if (!edata->advertised) {
3923		edata->advertised = advertising & eee->supported;
3924	} else if (edata->advertised & ~advertising) {
3925		netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
3926			    edata->advertised, advertising);
3927		rc = -EINVAL;
3928		goto eee_exit;
3929	}
3930
3931	eee->advertised = edata->advertised;
3932	eee->tx_lpi_enabled = edata->tx_lpi_enabled;
3933	eee->tx_lpi_timer = edata->tx_lpi_timer;
3934eee_ok:
3935	eee->eee_enabled = edata->eee_enabled;
3936
3937	if (netif_running(dev))
3938		rc = bnxt_hwrm_set_link_setting(bp, false, true);
3939
3940eee_exit:
3941	mutex_unlock(&bp->link_lock);
3942	return rc;
3943}
3944
3945static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
3946{
3947	struct bnxt *bp = netdev_priv(dev);
3948
3949	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3950		return -EOPNOTSUPP;
3951
3952	*edata = bp->eee;
3953	if (!bp->eee.eee_enabled) {
3954		/* Preserve tx_lpi_timer so that the last value will be used
3955		 * by default when it is re-enabled.
3956		 */
3957		edata->advertised = 0;
3958		edata->tx_lpi_enabled = 0;
3959	}
3960
3961	if (!bp->eee.eee_active)
3962		edata->lp_advertised = 0;
3963
3964	return 0;
3965}
3966
3967static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
3968					    u16 page_number, u8 bank,
3969					    u16 start_addr, u16 data_length,
3970					    u8 *buf)
3971{
3972	struct hwrm_port_phy_i2c_read_output *output;
3973	struct hwrm_port_phy_i2c_read_input *req;
3974	int rc, byte_offset = 0;
3975
3976	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
3977	if (rc)
3978		return rc;
3979
3980	output = hwrm_req_hold(bp, req);
3981	req->i2c_slave_addr = i2c_addr;
3982	req->page_number = cpu_to_le16(page_number);
3983	req->port_id = cpu_to_le16(bp->pf.port_id);
3984	do {
3985		u16 xfer_size;
3986
3987		xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
3988		data_length -= xfer_size;
3989		req->page_offset = cpu_to_le16(start_addr + byte_offset);
3990		req->data_length = xfer_size;
3991		req->enables =
3992			cpu_to_le32((start_addr + byte_offset ?
3993				     PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
3994				     0) |
3995				    (bank ?
3996				     PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
3997				     0));
3998		rc = hwrm_req_send(bp, req);
3999		if (!rc)
4000			memcpy(buf + byte_offset, output->data, xfer_size);
4001		byte_offset += xfer_size;
4002	} while (!rc && data_length > 0);
4003	hwrm_req_drop(bp, req);
4004
4005	return rc;
4006}
4007
4008static int bnxt_get_module_info(struct net_device *dev,
4009				struct ethtool_modinfo *modinfo)
4010{
4011	u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
4012	struct bnxt *bp = netdev_priv(dev);
4013	int rc;
4014
4015	/* No point in going further if phy status indicates
4016	 * module is not inserted or if it is powered down or
4017	 * if it is of type 10GBase-T
4018	 */
4019	if (bp->link_info.module_status >
4020		PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4021		return -EOPNOTSUPP;
4022
4023	/* This feature is not supported in older firmware versions */
4024	if (bp->hwrm_spec_code < 0x10202)
4025		return -EOPNOTSUPP;
4026
4027	rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
4028					      SFF_DIAG_SUPPORT_OFFSET + 1,
4029					      data);
4030	if (!rc) {
4031		u8 module_id = data[0];
4032		u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
4033
4034		switch (module_id) {
4035		case SFF_MODULE_ID_SFP:
4036			modinfo->type = ETH_MODULE_SFF_8472;
4037			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
4038			if (!diag_supported)
4039				modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4040			break;
4041		case SFF_MODULE_ID_QSFP:
4042		case SFF_MODULE_ID_QSFP_PLUS:
4043			modinfo->type = ETH_MODULE_SFF_8436;
4044			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4045			break;
4046		case SFF_MODULE_ID_QSFP28:
4047			modinfo->type = ETH_MODULE_SFF_8636;
4048			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
4049			break;
4050		default:
4051			rc = -EOPNOTSUPP;
4052			break;
4053		}
4054	}
4055	return rc;
4056}
4057
4058static int bnxt_get_module_eeprom(struct net_device *dev,
4059				  struct ethtool_eeprom *eeprom,
4060				  u8 *data)
4061{
4062	struct bnxt *bp = netdev_priv(dev);
4063	u16  start = eeprom->offset, length = eeprom->len;
4064	int rc = 0;
4065
4066	memset(data, 0, eeprom->len);
4067
4068	/* Read A0 portion of the EEPROM */
4069	if (start < ETH_MODULE_SFF_8436_LEN) {
4070		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
4071			length = ETH_MODULE_SFF_8436_LEN - start;
4072		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
4073						      start, length, data);
4074		if (rc)
4075			return rc;
4076		start += length;
4077		data += length;
4078		length = eeprom->len - length;
4079	}
4080
4081	/* Read A2 portion of the EEPROM */
4082	if (length) {
4083		start -= ETH_MODULE_SFF_8436_LEN;
4084		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
4085						      start, length, data);
4086	}
4087	return rc;
4088}
4089
4090static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
4091{
4092	if (bp->link_info.module_status <=
4093	    PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4094		return 0;
4095
4096	switch (bp->link_info.module_status) {
4097	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
4098		NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
4099		break;
4100	case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
4101		NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
4102		break;
4103	case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
4104		NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
4105		break;
4106	default:
4107		NL_SET_ERR_MSG_MOD(extack, "Unknown error");
4108		break;
4109	}
4110	return -EINVAL;
4111}
4112
4113static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
4114					  const struct ethtool_module_eeprom *page_data,
4115					  struct netlink_ext_ack *extack)
4116{
4117	struct bnxt *bp = netdev_priv(dev);
4118	int rc;
4119
4120	rc = bnxt_get_module_status(bp, extack);
4121	if (rc)
4122		return rc;
4123
4124	if (bp->hwrm_spec_code < 0x10202) {
4125		NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
4126		return -EINVAL;
4127	}
4128
4129	if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
4130		NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
4131		return -EINVAL;
4132	}
4133
4134	rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
4135					      page_data->page, page_data->bank,
4136					      page_data->offset,
4137					      page_data->length,
4138					      page_data->data);
4139	if (rc) {
4140		NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
4141		return rc;
4142	}
4143	return page_data->length;
4144}
4145
4146static int bnxt_nway_reset(struct net_device *dev)
4147{
4148	int rc = 0;
4149
4150	struct bnxt *bp = netdev_priv(dev);
4151	struct bnxt_link_info *link_info = &bp->link_info;
4152
4153	if (!BNXT_PHY_CFG_ABLE(bp))
4154		return -EOPNOTSUPP;
4155
4156	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
4157		return -EINVAL;
4158
4159	if (netif_running(dev))
4160		rc = bnxt_hwrm_set_link_setting(bp, true, false);
4161
4162	return rc;
4163}
4164
4165static int bnxt_set_phys_id(struct net_device *dev,
4166			    enum ethtool_phys_id_state state)
4167{
4168	struct hwrm_port_led_cfg_input *req;
4169	struct bnxt *bp = netdev_priv(dev);
4170	struct bnxt_pf_info *pf = &bp->pf;
4171	struct bnxt_led_cfg *led_cfg;
4172	u8 led_state;
4173	__le16 duration;
4174	int rc, i;
4175
4176	if (!bp->num_leds || BNXT_VF(bp))
4177		return -EOPNOTSUPP;
4178
4179	if (state == ETHTOOL_ID_ACTIVE) {
4180		led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
4181		duration = cpu_to_le16(500);
4182	} else if (state == ETHTOOL_ID_INACTIVE) {
4183		led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
4184		duration = cpu_to_le16(0);
4185	} else {
4186		return -EINVAL;
4187	}
4188	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
4189	if (rc)
4190		return rc;
4191
4192	req->port_id = cpu_to_le16(pf->port_id);
4193	req->num_leds = bp->num_leds;
4194	led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
4195	for (i = 0; i < bp->num_leds; i++, led_cfg++) {
4196		req->enables |= BNXT_LED_DFLT_ENABLES(i);
4197		led_cfg->led_id = bp->leds[i].led_id;
4198		led_cfg->led_state = led_state;
4199		led_cfg->led_blink_on = duration;
4200		led_cfg->led_blink_off = duration;
4201		led_cfg->led_group_id = bp->leds[i].led_group_id;
4202	}
4203	return hwrm_req_send(bp, req);
4204}
4205
4206static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
4207{
4208	struct hwrm_selftest_irq_input *req;
4209	int rc;
4210
4211	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
4212	if (rc)
4213		return rc;
4214
4215	req->cmpl_ring = cpu_to_le16(cmpl_ring);
4216	return hwrm_req_send(bp, req);
4217}
4218
4219static int bnxt_test_irq(struct bnxt *bp)
4220{
4221	int i;
4222
4223	for (i = 0; i < bp->cp_nr_rings; i++) {
4224		u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
4225		int rc;
4226
4227		rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
4228		if (rc)
4229			return rc;
4230	}
4231	return 0;
4232}
4233
4234static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
4235{
4236	struct hwrm_port_mac_cfg_input *req;
4237	int rc;
4238
4239	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
4240	if (rc)
4241		return rc;
4242
4243	req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
4244	if (enable)
4245		req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
4246	else
4247		req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
4248	return hwrm_req_send(bp, req);
4249}
4250
4251static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
4252{
4253	struct hwrm_port_phy_qcaps_output *resp;
4254	struct hwrm_port_phy_qcaps_input *req;
4255	int rc;
4256
4257	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
4258	if (rc)
4259		return rc;
4260
4261	resp = hwrm_req_hold(bp, req);
4262	rc = hwrm_req_send(bp, req);
4263	if (!rc)
4264		*force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
4265
4266	hwrm_req_drop(bp, req);
4267	return rc;
4268}
4269
4270static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
4271				    struct hwrm_port_phy_cfg_input *req)
4272{
4273	struct bnxt_link_info *link_info = &bp->link_info;
4274	u16 fw_advertising;
4275	u16 fw_speed;
4276	int rc;
4277
4278	if (!link_info->autoneg ||
4279	    (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
4280		return 0;
4281
4282	rc = bnxt_query_force_speeds(bp, &fw_advertising);
4283	if (rc)
4284		return rc;
4285
4286	fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
4287	if (BNXT_LINK_IS_UP(bp))
4288		fw_speed = bp->link_info.link_speed;
4289	else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
4290		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
4291	else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
4292		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
4293	else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
4294		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
4295	else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
4296		fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
4297
4298	req->force_link_speed = cpu_to_le16(fw_speed);
4299	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
4300				  PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4301	rc = hwrm_req_send(bp, req);
4302	req->flags = 0;
4303	req->force_link_speed = cpu_to_le16(0);
4304	return rc;
4305}
4306
4307static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
4308{
4309	struct hwrm_port_phy_cfg_input *req;
4310	int rc;
4311
4312	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
4313	if (rc)
4314		return rc;
4315
4316	/* prevent bnxt_disable_an_for_lpbk() from consuming the request */
4317	hwrm_req_hold(bp, req);
4318
4319	if (enable) {
4320		bnxt_disable_an_for_lpbk(bp, req);
4321		if (ext)
4322			req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
4323		else
4324			req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
4325	} else {
4326		req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
4327	}
4328	req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
4329	rc = hwrm_req_send(bp, req);
4330	hwrm_req_drop(bp, req);
4331	return rc;
4332}
4333
4334static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4335			    u32 raw_cons, int pkt_size)
4336{
4337	struct bnxt_napi *bnapi = cpr->bnapi;
4338	struct bnxt_rx_ring_info *rxr;
4339	struct bnxt_sw_rx_bd *rx_buf;
4340	struct rx_cmp *rxcmp;
4341	u16 cp_cons, cons;
4342	u8 *data;
4343	u32 len;
4344	int i;
4345
4346	rxr = bnapi->rx_ring;
4347	cp_cons = RING_CMP(raw_cons);
4348	rxcmp = (struct rx_cmp *)
4349		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
4350	cons = rxcmp->rx_cmp_opaque;
4351	rx_buf = &rxr->rx_buf_ring[cons];
4352	data = rx_buf->data_ptr;
4353	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
4354	if (len != pkt_size)
4355		return -EIO;
4356	i = ETH_ALEN;
4357	if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
4358		return -EIO;
4359	i += ETH_ALEN;
4360	for (  ; i < pkt_size; i++) {
4361		if (data[i] != (u8)(i & 0xff))
4362			return -EIO;
4363	}
4364	return 0;
4365}
4366
4367static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4368			      int pkt_size)
4369{
4370	struct tx_cmp *txcmp;
4371	int rc = -EIO;
4372	u32 raw_cons;
4373	u32 cons;
4374	int i;
4375
4376	raw_cons = cpr->cp_raw_cons;
4377	for (i = 0; i < 200; i++) {
4378		cons = RING_CMP(raw_cons);
4379		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
4380
4381		if (!TX_CMP_VALID(txcmp, raw_cons)) {
4382			udelay(5);
4383			continue;
4384		}
4385
4386		/* The valid test of the entry must be done first before
4387		 * reading any further.
4388		 */
4389		dma_rmb();
4390		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
4391		    TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
4392			rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
4393			raw_cons = NEXT_RAW_CMP(raw_cons);
4394			raw_cons = NEXT_RAW_CMP(raw_cons);
4395			break;
4396		}
4397		raw_cons = NEXT_RAW_CMP(raw_cons);
4398	}
4399	cpr->cp_raw_cons = raw_cons;
4400	return rc;
4401}
4402
4403static int bnxt_run_loopback(struct bnxt *bp)
4404{
4405	struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
4406	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4407	struct bnxt_cp_ring_info *cpr;
4408	int pkt_size, i = 0;
4409	struct sk_buff *skb;
4410	dma_addr_t map;
4411	u8 *data;
4412	int rc;
4413
4414	cpr = &rxr->bnapi->cp_ring;
4415	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4416		cpr = rxr->rx_cpr;
4417	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
4418	skb = netdev_alloc_skb(bp->dev, pkt_size);
4419	if (!skb)
4420		return -ENOMEM;
4421	data = skb_put(skb, pkt_size);
4422	ether_addr_copy(&data[i], bp->dev->dev_addr);
4423	i += ETH_ALEN;
4424	ether_addr_copy(&data[i], bp->dev->dev_addr);
4425	i += ETH_ALEN;
4426	for ( ; i < pkt_size; i++)
4427		data[i] = (u8)(i & 0xff);
4428
4429	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
4430			     DMA_TO_DEVICE);
4431	if (dma_mapping_error(&bp->pdev->dev, map)) {
4432		dev_kfree_skb(skb);
4433		return -EIO;
4434	}
4435	bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
4436
4437	/* Sync BD data before updating doorbell */
4438	wmb();
4439
4440	bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
4441	rc = bnxt_poll_loopback(bp, cpr, pkt_size);
4442
4443	dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
4444	dev_kfree_skb(skb);
4445	return rc;
4446}
4447
4448static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
4449{
4450	struct hwrm_selftest_exec_output *resp;
4451	struct hwrm_selftest_exec_input *req;
4452	int rc;
4453
4454	rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
4455	if (rc)
4456		return rc;
4457
4458	hwrm_req_timeout(bp, req, bp->test_info->timeout);
4459	req->flags = test_mask;
4460
4461	resp = hwrm_req_hold(bp, req);
4462	rc = hwrm_req_send(bp, req);
4463	*test_results = resp->test_success;
4464	hwrm_req_drop(bp, req);
4465	return rc;
4466}
4467
4468#define BNXT_DRV_TESTS			4
4469#define BNXT_MACLPBK_TEST_IDX		(bp->num_tests - BNXT_DRV_TESTS)
4470#define BNXT_PHYLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 1)
4471#define BNXT_EXTLPBK_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 2)
4472#define BNXT_IRQ_TEST_IDX		(BNXT_MACLPBK_TEST_IDX + 3)
4473
4474static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
4475			   u64 *buf)
4476{
4477	struct bnxt *bp = netdev_priv(dev);
4478	bool do_ext_lpbk = false;
4479	bool offline = false;
4480	u8 test_results = 0;
4481	u8 test_mask = 0;
4482	int rc = 0, i;
4483
4484	if (!bp->num_tests || !BNXT_PF(bp))
4485		return;
4486	memset(buf, 0, sizeof(u64) * bp->num_tests);
4487	if (!netif_running(dev)) {
4488		etest->flags |= ETH_TEST_FL_FAILED;
4489		return;
4490	}
4491
4492	if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
4493	    (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
4494		do_ext_lpbk = true;
4495
4496	if (etest->flags & ETH_TEST_FL_OFFLINE) {
4497		if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
4498			etest->flags |= ETH_TEST_FL_FAILED;
4499			netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
4500			return;
4501		}
4502		offline = true;
4503	}
4504
4505	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4506		u8 bit_val = 1 << i;
4507
4508		if (!(bp->test_info->offline_mask & bit_val))
4509			test_mask |= bit_val;
4510		else if (offline)
4511			test_mask |= bit_val;
4512	}
4513	if (!offline) {
4514		bnxt_run_fw_tests(bp, test_mask, &test_results);
4515	} else {
4516		bnxt_ulp_stop(bp);
4517		bnxt_close_nic(bp, true, false);
4518		bnxt_run_fw_tests(bp, test_mask, &test_results);
4519
4520		buf[BNXT_MACLPBK_TEST_IDX] = 1;
4521		bnxt_hwrm_mac_loopback(bp, true);
4522		msleep(250);
4523		rc = bnxt_half_open_nic(bp);
4524		if (rc) {
4525			bnxt_hwrm_mac_loopback(bp, false);
4526			etest->flags |= ETH_TEST_FL_FAILED;
4527			bnxt_ulp_start(bp, rc);
4528			return;
4529		}
4530		if (bnxt_run_loopback(bp))
4531			etest->flags |= ETH_TEST_FL_FAILED;
4532		else
4533			buf[BNXT_MACLPBK_TEST_IDX] = 0;
4534
4535		bnxt_hwrm_mac_loopback(bp, false);
4536		bnxt_hwrm_phy_loopback(bp, true, false);
4537		msleep(1000);
4538		if (bnxt_run_loopback(bp)) {
4539			buf[BNXT_PHYLPBK_TEST_IDX] = 1;
4540			etest->flags |= ETH_TEST_FL_FAILED;
4541		}
4542		if (do_ext_lpbk) {
4543			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
4544			bnxt_hwrm_phy_loopback(bp, true, true);
4545			msleep(1000);
4546			if (bnxt_run_loopback(bp)) {
4547				buf[BNXT_EXTLPBK_TEST_IDX] = 1;
4548				etest->flags |= ETH_TEST_FL_FAILED;
4549			}
4550		}
4551		bnxt_hwrm_phy_loopback(bp, false, false);
4552		bnxt_half_close_nic(bp);
4553		rc = bnxt_open_nic(bp, true, true);
4554		bnxt_ulp_start(bp, rc);
4555	}
4556	if (rc || bnxt_test_irq(bp)) {
4557		buf[BNXT_IRQ_TEST_IDX] = 1;
4558		etest->flags |= ETH_TEST_FL_FAILED;
4559	}
4560	for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4561		u8 bit_val = 1 << i;
4562
4563		if ((test_mask & bit_val) && !(test_results & bit_val)) {
4564			buf[i] = 1;
4565			etest->flags |= ETH_TEST_FL_FAILED;
4566		}
4567	}
4568}
4569
4570static int bnxt_reset(struct net_device *dev, u32 *flags)
4571{
4572	struct bnxt *bp = netdev_priv(dev);
4573	bool reload = false;
4574	u32 req = *flags;
4575
4576	if (!req)
4577		return -EINVAL;
4578
4579	if (!BNXT_PF(bp)) {
4580		netdev_err(dev, "Reset is not supported from a VF\n");
4581		return -EOPNOTSUPP;
4582	}
4583
4584	if (pci_vfs_assigned(bp->pdev) &&
4585	    !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
4586		netdev_err(dev,
4587			   "Reset not allowed when VFs are assigned to VMs\n");
4588		return -EBUSY;
4589	}
4590
4591	if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
4592		/* This feature is not supported in older firmware versions */
4593		if (bp->hwrm_spec_code >= 0x10803) {
4594			if (!bnxt_firmware_reset_chip(dev)) {
4595				netdev_info(dev, "Firmware reset request successful.\n");
4596				if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
4597					reload = true;
4598				*flags &= ~BNXT_FW_RESET_CHIP;
4599			}
4600		} else if (req == BNXT_FW_RESET_CHIP) {
4601			return -EOPNOTSUPP; /* only request, fail hard */
4602		}
4603	}
4604
4605	if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
4606		/* This feature is not supported in older firmware versions */
4607		if (bp->hwrm_spec_code >= 0x10803) {
4608			if (!bnxt_firmware_reset_ap(dev)) {
4609				netdev_info(dev, "Reset application processor successful.\n");
4610				reload = true;
4611				*flags &= ~BNXT_FW_RESET_AP;
4612			}
4613		} else if (req == BNXT_FW_RESET_AP) {
4614			return -EOPNOTSUPP; /* only request, fail hard */
4615		}
4616	}
4617
4618	if (reload)
4619		netdev_info(dev, "Reload driver to complete reset\n");
4620
4621	return 0;
4622}
4623
4624static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
4625{
4626	struct bnxt *bp = netdev_priv(dev);
4627
4628	if (dump->flag > BNXT_DUMP_CRASH) {
4629		netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
4630		return -EINVAL;
4631	}
4632
4633	if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
4634		netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
4635		return -EOPNOTSUPP;
4636	}
4637
4638	bp->dump_flag = dump->flag;
4639	return 0;
4640}
4641
4642static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
4643{
4644	struct bnxt *bp = netdev_priv(dev);
4645
4646	if (bp->hwrm_spec_code < 0x10801)
4647		return -EOPNOTSUPP;
4648
4649	dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
4650			bp->ver_resp.hwrm_fw_min_8b << 16 |
4651			bp->ver_resp.hwrm_fw_bld_8b << 8 |
4652			bp->ver_resp.hwrm_fw_rsvd_8b;
4653
4654	dump->flag = bp->dump_flag;
4655	dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
4656	return 0;
4657}
4658
4659static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
4660			      void *buf)
4661{
4662	struct bnxt *bp = netdev_priv(dev);
4663
4664	if (bp->hwrm_spec_code < 0x10801)
4665		return -EOPNOTSUPP;
4666
4667	memset(buf, 0, dump->len);
4668
4669	dump->flag = bp->dump_flag;
4670	return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
4671}
4672
4673static int bnxt_get_ts_info(struct net_device *dev,
4674			    struct ethtool_ts_info *info)
4675{
4676	struct bnxt *bp = netdev_priv(dev);
4677	struct bnxt_ptp_cfg *ptp;
4678
4679	ptp = bp->ptp_cfg;
4680	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4681				SOF_TIMESTAMPING_RX_SOFTWARE |
4682				SOF_TIMESTAMPING_SOFTWARE;
4683
4684	info->phc_index = -1;
4685	if (!ptp)
4686		return 0;
4687
4688	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
4689				 SOF_TIMESTAMPING_RX_HARDWARE |
4690				 SOF_TIMESTAMPING_RAW_HARDWARE;
4691	if (ptp->ptp_clock)
4692		info->phc_index = ptp_clock_index(ptp->ptp_clock);
4693
4694	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
4695
4696	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
4697			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
4698			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
4699
4700	if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
4701		info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
4702	return 0;
4703}
4704
4705void bnxt_ethtool_init(struct bnxt *bp)
4706{
4707	struct hwrm_selftest_qlist_output *resp;
4708	struct hwrm_selftest_qlist_input *req;
4709	struct bnxt_test_info *test_info;
4710	struct net_device *dev = bp->dev;
4711	int i, rc;
4712
4713	if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
4714		bnxt_get_pkgver(dev);
4715
4716	bp->num_tests = 0;
4717	if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
4718		return;
4719
4720	test_info = bp->test_info;
4721	if (!test_info) {
4722		test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
4723		if (!test_info)
4724			return;
4725		bp->test_info = test_info;
4726	}
4727
4728	if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
4729		return;
4730
4731	resp = hwrm_req_hold(bp, req);
4732	rc = hwrm_req_send_silent(bp, req);
4733	if (rc)
4734		goto ethtool_init_exit;
4735
4736	bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
4737	if (bp->num_tests > BNXT_MAX_TEST)
4738		bp->num_tests = BNXT_MAX_TEST;
4739
4740	test_info->offline_mask = resp->offline_tests;
4741	test_info->timeout = le16_to_cpu(resp->test_timeout);
4742	if (!test_info->timeout)
4743		test_info->timeout = HWRM_CMD_TIMEOUT;
4744	for (i = 0; i < bp->num_tests; i++) {
4745		char *str = test_info->string[i];
4746		char *fw_str = resp->test_name[i];
4747
4748		if (i == BNXT_MACLPBK_TEST_IDX) {
4749			strcpy(str, "Mac loopback test (offline)");
4750		} else if (i == BNXT_PHYLPBK_TEST_IDX) {
4751			strcpy(str, "Phy loopback test (offline)");
4752		} else if (i == BNXT_EXTLPBK_TEST_IDX) {
4753			strcpy(str, "Ext loopback test (offline)");
4754		} else if (i == BNXT_IRQ_TEST_IDX) {
4755			strcpy(str, "Interrupt_test (offline)");
4756		} else {
4757			snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
4758				 fw_str, test_info->offline_mask & (1 << i) ?
4759					"offline" : "online");
4760		}
4761	}
4762
4763ethtool_init_exit:
4764	hwrm_req_drop(bp, req);
4765}
4766
4767static void bnxt_get_eth_phy_stats(struct net_device *dev,
4768				   struct ethtool_eth_phy_stats *phy_stats)
4769{
4770	struct bnxt *bp = netdev_priv(dev);
4771	u64 *rx;
4772
4773	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4774		return;
4775
4776	rx = bp->rx_port_stats_ext.sw_stats;
4777	phy_stats->SymbolErrorDuringCarrier =
4778		*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
4779}
4780
4781static void bnxt_get_eth_mac_stats(struct net_device *dev,
4782				   struct ethtool_eth_mac_stats *mac_stats)
4783{
4784	struct bnxt *bp = netdev_priv(dev);
4785	u64 *rx, *tx;
4786
4787	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4788		return;
4789
4790	rx = bp->port_stats.sw_stats;
4791	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4792
4793	mac_stats->FramesReceivedOK =
4794		BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
4795	mac_stats->FramesTransmittedOK =
4796		BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
4797	mac_stats->FrameCheckSequenceErrors =
4798		BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
4799	mac_stats->AlignmentErrors =
4800		BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
4801	mac_stats->OutOfRangeLengthField =
4802		BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
4803}
4804
4805static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
4806				    struct ethtool_eth_ctrl_stats *ctrl_stats)
4807{
4808	struct bnxt *bp = netdev_priv(dev);
4809	u64 *rx;
4810
4811	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4812		return;
4813
4814	rx = bp->port_stats.sw_stats;
4815	ctrl_stats->MACControlFramesReceived =
4816		BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
4817}
4818
4819static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
4820	{    0,    64 },
4821	{   65,   127 },
4822	{  128,   255 },
4823	{  256,   511 },
4824	{  512,  1023 },
4825	{ 1024,  1518 },
4826	{ 1519,  2047 },
4827	{ 2048,  4095 },
4828	{ 4096,  9216 },
4829	{ 9217, 16383 },
4830	{}
4831};
4832
4833static void bnxt_get_rmon_stats(struct net_device *dev,
4834				struct ethtool_rmon_stats *rmon_stats,
4835				const struct ethtool_rmon_hist_range **ranges)
4836{
4837	struct bnxt *bp = netdev_priv(dev);
4838	u64 *rx, *tx;
4839
4840	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4841		return;
4842
4843	rx = bp->port_stats.sw_stats;
4844	tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4845
4846	rmon_stats->jabbers =
4847		BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
4848	rmon_stats->oversize_pkts =
4849		BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
4850	rmon_stats->undersize_pkts =
4851		BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
4852
4853	rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
4854	rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
4855	rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
4856	rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
4857	rmon_stats->hist[4] =
4858		BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
4859	rmon_stats->hist[5] =
4860		BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
4861	rmon_stats->hist[6] =
4862		BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
4863	rmon_stats->hist[7] =
4864		BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
4865	rmon_stats->hist[8] =
4866		BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
4867	rmon_stats->hist[9] =
4868		BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
4869
4870	rmon_stats->hist_tx[0] =
4871		BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
4872	rmon_stats->hist_tx[1] =
4873		BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
4874	rmon_stats->hist_tx[2] =
4875		BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
4876	rmon_stats->hist_tx[3] =
4877		BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
4878	rmon_stats->hist_tx[4] =
4879		BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
4880	rmon_stats->hist_tx[5] =
4881		BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
4882	rmon_stats->hist_tx[6] =
4883		BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
4884	rmon_stats->hist_tx[7] =
4885		BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
4886	rmon_stats->hist_tx[8] =
4887		BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
4888	rmon_stats->hist_tx[9] =
4889		BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
4890
4891	*ranges = bnxt_rmon_ranges;
4892}
4893
4894static void bnxt_get_link_ext_stats(struct net_device *dev,
4895				    struct ethtool_link_ext_stats *stats)
4896{
4897	struct bnxt *bp = netdev_priv(dev);
4898	u64 *rx;
4899
4900	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4901		return;
4902
4903	rx = bp->rx_port_stats_ext.sw_stats;
4904	stats->link_down_events =
4905		*(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
4906}
4907
4908void bnxt_ethtool_free(struct bnxt *bp)
4909{
4910	kfree(bp->test_info);
4911	bp->test_info = NULL;
4912}
4913
4914const struct ethtool_ops bnxt_ethtool_ops = {
4915	.cap_link_lanes_supported	= 1,
4916	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4917				     ETHTOOL_COALESCE_MAX_FRAMES |
4918				     ETHTOOL_COALESCE_USECS_IRQ |
4919				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
4920				     ETHTOOL_COALESCE_STATS_BLOCK_USECS |
4921				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
4922				     ETHTOOL_COALESCE_USE_CQE,
4923	.get_link_ksettings	= bnxt_get_link_ksettings,
4924	.set_link_ksettings	= bnxt_set_link_ksettings,
4925	.get_fec_stats		= bnxt_get_fec_stats,
4926	.get_fecparam		= bnxt_get_fecparam,
4927	.set_fecparam		= bnxt_set_fecparam,
4928	.get_pause_stats	= bnxt_get_pause_stats,
4929	.get_pauseparam		= bnxt_get_pauseparam,
4930	.set_pauseparam		= bnxt_set_pauseparam,
4931	.get_drvinfo		= bnxt_get_drvinfo,
4932	.get_regs_len		= bnxt_get_regs_len,
4933	.get_regs		= bnxt_get_regs,
4934	.get_wol		= bnxt_get_wol,
4935	.set_wol		= bnxt_set_wol,
4936	.get_coalesce		= bnxt_get_coalesce,
4937	.set_coalesce		= bnxt_set_coalesce,
4938	.get_msglevel		= bnxt_get_msglevel,
4939	.set_msglevel		= bnxt_set_msglevel,
4940	.get_sset_count		= bnxt_get_sset_count,
4941	.get_strings		= bnxt_get_strings,
4942	.get_ethtool_stats	= bnxt_get_ethtool_stats,
4943	.set_ringparam		= bnxt_set_ringparam,
4944	.get_ringparam		= bnxt_get_ringparam,
4945	.get_channels		= bnxt_get_channels,
4946	.set_channels		= bnxt_set_channels,
4947	.get_rxnfc		= bnxt_get_rxnfc,
4948	.set_rxnfc		= bnxt_set_rxnfc,
4949	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
4950	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
4951	.get_rxfh               = bnxt_get_rxfh,
4952	.set_rxfh		= bnxt_set_rxfh,
4953	.flash_device		= bnxt_flash_device,
4954	.get_eeprom_len         = bnxt_get_eeprom_len,
4955	.get_eeprom             = bnxt_get_eeprom,
4956	.set_eeprom		= bnxt_set_eeprom,
4957	.get_link		= bnxt_get_link,
4958	.get_link_ext_stats	= bnxt_get_link_ext_stats,
4959	.get_eee		= bnxt_get_eee,
4960	.set_eee		= bnxt_set_eee,
4961	.get_module_info	= bnxt_get_module_info,
4962	.get_module_eeprom	= bnxt_get_module_eeprom,
4963	.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
4964	.nway_reset		= bnxt_nway_reset,
4965	.set_phys_id		= bnxt_set_phys_id,
4966	.self_test		= bnxt_self_test,
4967	.get_ts_info		= bnxt_get_ts_info,
4968	.reset			= bnxt_reset,
4969	.set_dump		= bnxt_set_dump,
4970	.get_dump_flag		= bnxt_get_dump_flag,
4971	.get_dump_data		= bnxt_get_dump_data,
4972	.get_eth_phy_stats	= bnxt_get_eth_phy_stats,
4973	.get_eth_mac_stats	= bnxt_get_eth_mac_stats,
4974	.get_eth_ctrl_stats	= bnxt_get_eth_ctrl_stats,
4975	.get_rmon_stats		= bnxt_get_rmon_stats,
4976};